From 7b9dd93ee8d834fa011b1a78a6bfc85eebb73e8b Mon Sep 17 00:00:00 2001 From: Carson Long Date: Tue, 14 Jun 2022 22:42:20 -0400 Subject: [PATCH 1/2] Regenerate `go.mod` and vendor again Also sets min go version to go1.18. --- src/go.mod | 39 +- src/go.sum | 93 +- .../go-diodes/.travis.yml | 17 - .../code.cloudfoundry.org/go-diodes/README.md | 6 +- .../go-metric-registry/.golangci.yml | 14 + .../go-metric-registry/README.md | 15 +- .../go-metric-registry/prometheus.go | 33 +- .../testhelpers/spy_registry.go | 60 +- .../go-metric-registry/tools.go | 8 + .../code.cloudfoundry.org/rfc5424/README.md | 2 +- .../tlsconfig/.travis.yml | 12 - .../code.cloudfoundry.org/tlsconfig/config.go | 4 +- .../github.com/armon/go-metrics/inmem.go | 29 +- .../github.com/armon/go-metrics/metrics.go | 25 +- .../hashicorp/go-immutable-radix/.travis.yml | 3 + .../hashicorp/go-immutable-radix/CHANGELOG.md | 9 - .../hashicorp/go-immutable-radix/README.md | 27 +- .../hashicorp/go-immutable-radix/iradix.go | 14 - .../hashicorp/go-immutable-radix/iter.go | 99 +- .../hashicorp/go-immutable-radix/node.go | 12 - .../github.com/hashicorp/go-msgpack/LICENSE | 39 +- .../hashicorp/go-msgpack/codec/0doc.go | 143 + .../hashicorp/go-msgpack/codec/README.md | 174 + .../hashicorp/go-msgpack/codec/binc.go | 1079 +- .../hashicorp/go-msgpack/codec/build.sh | 267 - .../hashicorp/go-msgpack/codec/cbor.go | 767 - .../hashicorp/go-msgpack/codec/codecgen.go | 13 - .../hashicorp/go-msgpack/codec/decode.go | 3475 +- .../hashicorp/go-msgpack/codec/doc.go | 245 - .../hashicorp/go-msgpack/codec/encode.go | 2135 +- .../go-msgpack/codec/fast-path.generated.go | 33668 ---------------- .../go-msgpack/codec/fast-path.go.tmpl | 491 - .../go-msgpack/codec/fast-path.not.go | 47 - .../go-msgpack/codec/gen-dec-array.go.tmpl | 78 - .../go-msgpack/codec/gen-dec-map.go.tmpl | 42 - .../go-msgpack/codec/gen-enc-chan.go.tmpl | 27 - .../go-msgpack/codec/gen-helper.generated.go | 343 - .../go-msgpack/codec/gen-helper.go.tmpl | 308 - .../go-msgpack/codec/gen.generated.go | 164 - .../hashicorp/go-msgpack/codec/gen.go | 2149 - .../codec/goversion_arrayof_gte_go15.go | 14 - .../codec/goversion_arrayof_lt_go15.go | 14 - .../codec/goversion_makemap_gte_go19.go | 15 - .../codec/goversion_makemap_lt_go19.go | 12 - ...version_unexportedembeddedptr_gte_go110.go | 8 - ...oversion_unexportedembeddedptr_lt_go110.go | 8 - .../codec/goversion_unsupported_lt_go14.go | 17 - .../codec/goversion_vendor_eq_go15.go | 10 - .../codec/goversion_vendor_eq_go16.go | 10 - .../codec/goversion_vendor_gte_go17.go | 8 - .../codec/goversion_vendor_lt_go15.go | 8 - .../hashicorp/go-msgpack/codec/helper.go | 3039 +- .../go-msgpack/codec/helper_internal.go | 201 +- .../go-msgpack/codec/helper_not_unsafe.go | 331 - .../go-msgpack/codec/helper_unsafe.go | 745 - .../hashicorp/go-msgpack/codec/json.go | 1491 - .../go-msgpack/codec/mammoth-test.go.tmpl | 154 - .../go-msgpack/codec/mammoth2-test.go.tmpl | 94 - .../hashicorp/go-msgpack/codec/msgpack.go | 1084 +- .../codec/{test.py => msgpack_test.py} | 50 +- .../hashicorp/go-msgpack/codec/rpc.go | 197 +- .../hashicorp/go-msgpack/codec/simple.go | 611 +- .../go-msgpack/codec/test-cbor-goldens.json | 639 - .../hashicorp/go-msgpack/codec/time.go | 193 + .../hashicorp/golang-lru/simplelru/lru.go | 16 - .../golang-lru/simplelru/lru_interface.go | 7 +- .../github.com/nats-io/nats.go/.travis.yml | 2 +- src/vendor/github.com/nats-io/nats.go/.words | 42 +- .../github.com/nats-io/nats.go/.words.readme | 25 + .../github.com/nats-io/nats.go/README.md | 2 +- .../github.com/nats-io/nats.go/context.go | 19 +- .../github.com/nats-io/nats.go/go_test.mod | 10 +- .../github.com/nats-io/nats.go/go_test.sum | 24 +- src/vendor/github.com/nats-io/nats.go/js.go | 243 +- src/vendor/github.com/nats-io/nats.go/jsm.go | 106 +- src/vendor/github.com/nats-io/nats.go/kv.go | 131 +- src/vendor/github.com/nats-io/nats.go/nats.go | 94 +- .../github.com/nats-io/nats.go/object.go | 6 +- src/vendor/github.com/nats-io/nats.go/ws.go | 42 +- .../prometheus/collectors/collectors.go | 40 + .../collectors/dbstats_collector.go | 119 + .../collectors/dbstats_collector_go115.go | 31 + .../collectors/dbstats_collector_pre_go115.go | 27 + .../prometheus/collectors/expvar_collector.go | 57 + .../collectors/go_collector_go116.go | 49 + .../collectors/go_collector_latest.go | 91 + .../collectors/process_collector.go | 56 + .../client_golang/prometheus/go_collector.go | 10 +- .../prometheus/go_collector_go116.go | 17 +- ...lector_go117.go => go_collector_latest.go} | 239 +- .../prometheus/internal/go_runtime_metrics.go | 14 +- .../x/tools/go/ast/inspector/typeof.go | 4 - src/vendor/modules.txt | 47 +- 93 files changed, 4863 insertions(+), 51835 deletions(-) delete mode 100644 src/vendor/code.cloudfoundry.org/go-diodes/.travis.yml create mode 100644 src/vendor/code.cloudfoundry.org/go-metric-registry/.golangci.yml create mode 100644 src/vendor/code.cloudfoundry.org/go-metric-registry/tools.go delete mode 100644 src/vendor/code.cloudfoundry.org/tlsconfig/.travis.yml create mode 100644 src/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml delete mode 100644 src/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md create mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go create mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/README.md delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/build.sh delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/doc.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/gen.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/json.go delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl rename src/vendor/github.com/hashicorp/go-msgpack/codec/{test.py => msgpack_test.py} (69%) delete mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json create mode 100644 src/vendor/github.com/hashicorp/go-msgpack/codec/time.go create mode 100644 src/vendor/github.com/nats-io/nats.go/.words.readme create mode 100644 src/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go create mode 100644 src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go create mode 100644 src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go create mode 100644 src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go create mode 100644 src/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go create mode 100644 src/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go create mode 100644 src/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go create mode 100644 src/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go rename src/vendor/github.com/prometheus/client_golang/prometheus/{go_collector_go117.go => go_collector_latest.go} (59%) diff --git a/src/go.mod b/src/go.mod index 45558e3e..99f1e1de 100644 --- a/src/go.mod +++ b/src/go.mod @@ -1,15 +1,15 @@ module code.cloudfoundry.org/system-metrics-scraper -go 1.17 +go 1.18 require ( code.cloudfoundry.org/go-envstruct v1.5.0 code.cloudfoundry.org/go-loggregator v0.0.0-20190813173818-049b6bf8152a // pinned - code.cloudfoundry.org/go-metric-registry v0.0.0-20200413202920-40d97c8804ec - code.cloudfoundry.org/tlsconfig v0.0.0-20200131000646-bbe0f8da39b3 + code.cloudfoundry.org/go-metric-registry v0.0.0-20220601181753-a64f39ee2597 + code.cloudfoundry.org/tlsconfig v0.0.0-20211123175040-23cc9f05b6b3 github.com/hashicorp/go-hclog v1.2.1 github.com/hashicorp/raft v1.3.9 - github.com/nats-io/nats.go v1.13.1-0.20220308171302-2f2f6968e98d + github.com/nats-io/nats.go v1.16.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.19.0 github.com/prometheus/client_model v0.2.0 @@ -19,43 +19,34 @@ require ( ) require ( - code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee // indirect - code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a // indirect - github.com/armon/go-metrics v0.3.3 // indirect + code.cloudfoundry.org/go-diodes v0.0.0-20220601181242-ac2da19efd60 // indirect + code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 // indirect + github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/gogo/protobuf v1.3.2 // indirect; pinned + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/hashicorp/go-immutable-radix v1.2.0 // indirect - github.com/hashicorp/go-msgpack v1.1.5 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/kr/pretty v0.2.0 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/hashicorp/go-immutable-radix v1.0.0 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/golang-lru v0.5.1 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/nats-io/nats-server/v2 v2.7.4 // indirect + github.com/nats-io/nats-server/v2 v2.8.4 // indirect github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_golang v1.12.2 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/square/certstrap v1.2.0 // indirect - golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 // indirect + golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e // indirect google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect ) - -require ( - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect - github.com/klauspost/compress v1.15.1 // indirect - golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect - golang.org/x/tools v0.0.0-20210106214847-113979e3529a // indirect -) diff --git a/src/go.sum b/src/go.sum index f2708813..218c767f 100644 --- a/src/go.sum +++ b/src/go.sum @@ -31,24 +31,24 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/go-diodes v0.0.0-20180905200951-72629b5276e3/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= -code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee h1:iAAPf9s7/+BIiGf+RjgcXLm3NoZaLIJsBXJuUa63Lx8= -code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= +code.cloudfoundry.org/go-diodes v0.0.0-20220601181242-ac2da19efd60 h1:hBycJRDauXKMEAOl90iFjTqrQ4VNGb918x3lLLDXBBU= +code.cloudfoundry.org/go-diodes v0.0.0-20220601181242-ac2da19efd60/go.mod h1:HLP7HKUU1eqMAGMk247yT91tDDi4xxnehkyXh6hGcr0= code.cloudfoundry.org/go-envstruct v1.5.0 h1:GmDS5Kbckkoxy39JGRwD/13jO1OROqf69B2vy4WBv/k= code.cloudfoundry.org/go-envstruct v1.5.0/go.mod h1:E2S/gzRZpZ51PZnIv7Bo7QvcgH18yio19upkrRk0xLU= code.cloudfoundry.org/go-loggregator v0.0.0-20190813173818-049b6bf8152a h1:OQna3GDyBRifiMC0hp/6l03VPtnepsEhBRMb15V9eq8= code.cloudfoundry.org/go-loggregator v0.0.0-20190813173818-049b6bf8152a/go.mod h1:fYXv8qS6feR/OV1q5N1z/R70y0y4KHZgFSQ8GhWTN1M= -code.cloudfoundry.org/go-metric-registry v0.0.0-20200413202920-40d97c8804ec h1:smbjOXKD09W1ua5vOFt4gn0blVtuG3l3nAxwSwpi6Ts= -code.cloudfoundry.org/go-metric-registry v0.0.0-20200413202920-40d97c8804ec/go.mod h1:cjL7/hjRgOnMy/FbPsGZG9qmR6oSPKG/FwKLLlh2j2U= -code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a h1:8rqv2w8xEceNwckcF5ONeRt0qBHlh5bnNfFnYTrZbxs= +code.cloudfoundry.org/go-metric-registry v0.0.0-20220601181753-a64f39ee2597 h1:6Ua4WJp6xfSR3dXMo1k2r1NESqbfLo9T1aCMtpPLjz4= +code.cloudfoundry.org/go-metric-registry v0.0.0-20220601181753-a64f39ee2597/go.mod h1:PbZgZrHBmACUFOQvkrohFXMd17H3qyX/6YU8ZXM7CM8= code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= +code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 h1:mrZQaZmuDIPhSp6b96b+CRKC2uH44ifa5cjDV2epKis= +code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= code.cloudfoundry.org/tlsconfig v0.0.0-20190710180242-462f72de1106/go.mod h1:VxNwgI63VxUGfSR/F3VzjOpnWLbeH+AB1pQugK1i1bA= -code.cloudfoundry.org/tlsconfig v0.0.0-20200131000646-bbe0f8da39b3 h1:2Qal+q+tw/DmDOoJBWwDCPE3lIJNj/1o7oMkkb2c5SI= -code.cloudfoundry.org/tlsconfig v0.0.0-20200131000646-bbe0f8da39b3/go.mod h1:eTbFJpyXRGuFVyg5+oaj9B2eIbIc+0/kZjH8ftbtdew= +code.cloudfoundry.org/tlsconfig v0.0.0-20211123175040-23cc9f05b6b3 h1:fv6SwdaX2uVcQKR8mXZy+6oYdthbKzkgmBzvoxSq/jY= +code.cloudfoundry.org/tlsconfig v0.0.0-20211123175040-23cc9f05b6b3/go.mod h1:CKI5CV+3MlfcohVSuU3FxXubFyC52lYJGMLnZ2ltvks= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -56,9 +56,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= -github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -83,7 +82,6 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -114,9 +112,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -168,7 +165,6 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -178,30 +174,23 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= -github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= -github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/raft v1.3.9 h1:9yuo1aR0bFTr1cw7pj3S2Bk6MhJCsnr2NAxvIBrP2x4= github.com/hashicorp/raft v1.3.9/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -210,21 +199,16 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.4 h1:eijASRJcobkVtSt81Olfh7JX43osYLwy5krOJo6YEu4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= @@ -235,7 +219,6 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -243,12 +226,11 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY= -github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= -github.com/nats-io/nats-server/v2 v2.7.4 h1:c+BZJ3rGzUKCBIM4IXO8uNT2u1vajGbD1kPA6wqCEaM= -github.com/nats-io/nats-server/v2 v2.7.4/go.mod h1:1vZ2Nijh8tcyNe8BDVyTviCd9NYzRbubQYiEHsvOQWc= -github.com/nats-io/nats.go v1.13.1-0.20220308171302-2f2f6968e98d h1:zJf4l8Kp67RIZhoVeniSLZs69SHNgjLHz0aNsqPPlx8= -github.com/nats-io/nats.go v1.13.1-0.20220308171302-2f2f6968e98d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I= +github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4= +github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4= +github.com/nats-io/nats.go v1.16.0 h1:zvLE7fGBQYW6MWaFaRdsgm9qT39PJDQoju+DS8KsO1g= +github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= @@ -259,18 +241,13 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.9.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.6.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -284,12 +261,11 @@ github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -298,7 +274,6 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -308,7 +283,6 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= @@ -350,9 +324,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 h1:S25/rfnfsMVgORT4/J61MJ7rdyseOZOyvLIrZEZ7s6s= -golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -416,9 +389,7 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -444,7 +415,6 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181128092732-4ed8d59d0b35/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -455,7 +425,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -490,7 +459,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -507,9 +475,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -517,7 +483,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -549,13 +514,11 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/src/vendor/code.cloudfoundry.org/go-diodes/.travis.yml b/src/vendor/code.cloudfoundry.org/go-diodes/.travis.yml deleted file mode 100644 index a9992c4d..00000000 --- a/src/vendor/code.cloudfoundry.org/go-diodes/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - 1.x - - master - -install: | - mkdir -p $HOME/gopath/src/code.cloudfoundry.org/go-diodes - rsync -az ${TRAVIS_BUILD_DIR}/ $HOME/gopath/src/code.cloudfoundry.org/go-diodes/ - export TRAVIS_BUILD_DIR=$GOPATH/src/code.cloudfoundry.org/go-diodes - go get -t -d -v code.cloudfoundry.org/go-diodes/... - -script: go test code.cloudfoundry.org/go-diodes/... --race - -matrix: - allow_failures: - - go: master diff --git a/src/vendor/code.cloudfoundry.org/go-diodes/README.md b/src/vendor/code.cloudfoundry.org/go-diodes/README.md index ff3ae457..9f02b4df 100644 --- a/src/vendor/code.cloudfoundry.org/go-diodes/README.md +++ b/src/vendor/code.cloudfoundry.org/go-diodes/README.md @@ -1,6 +1,8 @@ ![diode][diode-logo] -[![GoDoc][go-doc-badge]][go-doc] [![travis][travis-badge]][travis] +[![GoDoc][go-doc-badge]][go-doc] + +If you have any questions, or want to get attention for a PR or issue please reach out on the [#logging-and-metrics channel in the cloudfoundry slack](https://cloudfoundry.slack.com/archives/CUW93AF3M) Diodes are ring buffers manipulated via atomics. @@ -155,5 +157,3 @@ encounter this issue. [diode-logo]: https://raw.githubusercontent.com/cloudfoundry/go-diodes/gh-pages/diode-logo.png [go-doc-badge]: https://godoc.org/code.cloudfoundry.org/go-diodes?status.svg [go-doc]: https://godoc.org/code.cloudfoundry.org/go-diodes -[travis-badge]: https://travis-ci.org/cloudfoundry/go-diodes.svg?branch=master -[travis]: https://travis-ci.org/cloudfoundry/go-diodes?branch=master diff --git a/src/vendor/code.cloudfoundry.org/go-metric-registry/.golangci.yml b/src/vendor/code.cloudfoundry.org/go-metric-registry/.golangci.yml new file mode 100644 index 00000000..53f67f41 --- /dev/null +++ b/src/vendor/code.cloudfoundry.org/go-metric-registry/.golangci.yml @@ -0,0 +1,14 @@ +linters: + enable: + # Checks for non-ASCII identifiers + - asciicheck + # Computes and checks the cyclomatic complexity of functions. + - gocyclo + # Inspects source code for security problems. + - gosec + +issues: + # Disable max issues per linter. + max-issues-per-linter: 0 + # Disable max same issues. + max-same-issues: 0 diff --git a/src/vendor/code.cloudfoundry.org/go-metric-registry/README.md b/src/vendor/code.cloudfoundry.org/go-metric-registry/README.md index f169059b..b03f8b47 100644 --- a/src/vendor/code.cloudfoundry.org/go-metric-registry/README.md +++ b/src/vendor/code.cloudfoundry.org/go-metric-registry/README.md @@ -1,15 +1,18 @@ # go-metric-registry -[![slack.cloudfoundry.org][slack-badge]][loggregator-slack] +[![GoDoc][go-doc-badge]][go-doc] -This is a golang client library wrapping Prometheus registry with help text required by default. +This is a Golang library wrapping Prometheus. + +If you have any questions, or want to get attention for a PR or issue please reach out on the [#logging-and-metrics channel in the cloudfoundry slack](https://cloudfoundry.slack.com/archives/CUW93AF3M) ## Usage This repository should be imported as: -`import loggregator "code.cloudfoundry.org/go-metric-registry"` +`import metrics "code.cloudfoundry.org/go-metric-registry"` + +An example app can be referenced in [the examples directory](https://github.com/cloudfoundry/go-metric-registry/blob/main/examples/main.go). -An example app can be referenced in [the examples directory](https://github.com/cloudfoundry/go-metric-registry/blob/master/examples/main.go). +[go-doc-badge]: https://godoc.org/code.cloudfoundry.org/go-metric-registry?status.svg +[go-doc]: https://godoc.org/code.cloudfoundry.org/go-metric-registry -[slack-badge]: https://slack.cloudfoundry.org/badge.svg -[loggregator-slack]: https://cloudfoundry.slack.com/archives/loggregator \ No newline at end of file diff --git a/src/vendor/code.cloudfoundry.org/go-metric-registry/prometheus.go b/src/vendor/code.cloudfoundry.org/go-metric-registry/prometheus.go index 9ae65850..d7ee1974 100644 --- a/src/vendor/code.cloudfoundry.org/go-metric-registry/prometheus.go +++ b/src/vendor/code.cloudfoundry.org/go-metric-registry/prometheus.go @@ -11,6 +11,7 @@ import ( "code.cloudfoundry.org/tlsconfig" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" ) @@ -57,15 +58,17 @@ func NewRegistry(logger *log.Logger, opts ...RegistryOption) *Registry { registry := prometheus.NewRegistry() pr.registerer = registry - pr.registerer.MustRegister(prometheus.NewGoCollector()) - pr.registerer.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) - pr.mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{ Registry: pr.registerer, })) return pr } +func (p *Registry) RegisterDebugMetrics() { + p.registerer.MustRegister(collectors.NewGoCollector()) + p.registerer.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) +} + // Creates new counter. When a duplicate is registered, the Registry will return // the previously created metric. func (p *Registry) NewCounter(name, helpText string, opts ...MetricOption) Counter { @@ -89,6 +92,18 @@ func (p *Registry) NewHistogram(name, helpText string, buckets []float64, opts . return p.registerCollector(name, h).(Histogram) } +func (p *Registry) RemoveGauge(g Gauge) { + p.registerer.Unregister(g.(prometheus.Collector)) +} + +func (p *Registry) RemoveHistogram(h Histogram) { + p.registerer.Unregister(h.(prometheus.Collector)) +} + +func (p *Registry) RemoveCounter(c Counter) { + p.registerer.Unregister(c.(prometheus.Collector)) +} + func (p *Registry) registerCollector(name string, c prometheus.Collector) prometheus.Collector { err := p.registerer.Register(c) if err != nil { @@ -164,8 +179,8 @@ func (p *Registry) start(ipAddr string, port int) { s := http.Server{ Addr: addr, Handler: p.mux, - ReadTimeout: 5 * time.Second, - WriteTimeout: 5 * time.Second, + ReadTimeout: 5 * time.Minute, + WriteTimeout: 5 * time.Minute, } lis, err := net.Listen("tcp", addr) @@ -177,7 +192,7 @@ func (p *Registry) start(ipAddr string, port int) { parts := strings.Split(lis.Addr().String(), ":") p.port = parts[len(parts)-1] - go s.Serve(lis) + go s.Serve(lis) //nolint:errcheck } func (p *Registry) startTLS(port int, certFile, keyFile, caFile string) { @@ -196,8 +211,8 @@ func (p *Registry) startTLS(port int, certFile, keyFile, caFile string) { Addr: addr, Handler: p.mux, TLSConfig: tlsConfig, - ReadTimeout: 5 * time.Second, - WriteTimeout: 5 * time.Second, + ReadTimeout: 5 * time.Minute, + WriteTimeout: 5 * time.Minute, } lis, err := tls.Listen("tcp", addr, tlsConfig) @@ -209,7 +224,7 @@ func (p *Registry) startTLS(port int, certFile, keyFile, caFile string) { parts := strings.Split(lis.Addr().String(), ":") p.port = parts[len(parts)-1] - go s.Serve(lis) + go s.Serve(lis) //nolint:errcheck } // Options applied to metrics on creation diff --git a/src/vendor/code.cloudfoundry.org/go-metric-registry/testhelpers/spy_registry.go b/src/vendor/code.cloudfoundry.org/go-metric-registry/testhelpers/spy_registry.go index b782164b..c38bfcfe 100644 --- a/src/vendor/code.cloudfoundry.org/go-metric-registry/testhelpers/spy_registry.go +++ b/src/vendor/code.cloudfoundry.org/go-metric-registry/testhelpers/spy_registry.go @@ -10,8 +10,9 @@ import ( ) type SpyMetricsRegistry struct { - mu sync.Mutex - Metrics map[string]*SpyMetric + mu sync.Mutex + Metrics map[string]*SpyMetric + debugMetrics bool } func NewMetricsRegistry() *SpyMetricsRegistry { @@ -20,6 +21,12 @@ func NewMetricsRegistry() *SpyMetricsRegistry { } } +func (s *SpyMetricsRegistry) RegisterDebugMetrics() { + s.mu.Lock() + defer s.mu.Unlock() + s.debugMetrics = true +} + func (s *SpyMetricsRegistry) NewCounter(name, helpText string, opts ...metrics.MetricOption) metrics.Counter { s.mu.Lock() defer s.mu.Unlock() @@ -45,11 +52,27 @@ func (s *SpyMetricsRegistry) NewHistogram(name, helpText string, buckets []float defer s.mu.Unlock() m := newSpyMetric(name, helpText, opts) + m.buckets = buckets m = s.addMetric(m) return m } +func (p *SpyMetricsRegistry) RemoveGauge(g metrics.Gauge) { + sm := g.(*SpyMetric) + p.removeMetric(sm) +} + +func (p *SpyMetricsRegistry) RemoveHistogram(h metrics.Histogram) { + sm := h.(*SpyMetric) + p.removeMetric(sm) +} + +func (p *SpyMetricsRegistry) RemoveCounter(c metrics.Counter) { + sm := c.(*SpyMetric) + p.removeMetric(sm) +} + func (s *SpyMetricsRegistry) addMetric(sm *SpyMetric) *SpyMetric { n := getMetricName(sm.name, sm.Opts.ConstLabels) @@ -61,6 +84,18 @@ func (s *SpyMetricsRegistry) addMetric(sm *SpyMetric) *SpyMetric { return s.Metrics[n] } +func (s *SpyMetricsRegistry) removeMetric(sm *SpyMetric) { + n := getMetricName(sm.name, sm.Opts.ConstLabels) + + delete(s.Metrics, n) +} + +func (s *SpyMetricsRegistry) GetDebugMetricsEnabled() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.debugMetrics +} + func (s *SpyMetricsRegistry) GetMetric(name string, tags map[string]string) *SpyMetric { s.mu.Lock() defer s.mu.Unlock() @@ -99,7 +134,7 @@ func (s *SpyMetricsRegistry) HasMetric(name string, tags map[string]string) bool func newSpyMetric(name, helpText string, opts []metrics.MetricOption) *SpyMetric { sm := &SpyMetric{ - name: name, + name: name, helpText: helpText, Opts: &prometheus.Opts{ ConstLabels: make(prometheus.Labels), @@ -110,7 +145,7 @@ func newSpyMetric(name, helpText string, opts []metrics.MetricOption) *SpyMetric o(sm.Opts) } - for k, _ := range sm.Opts.ConstLabels { + for k := range sm.Opts.ConstLabels { sm.keys = append(sm.keys, k) } sort.Strings(sm.keys) @@ -119,13 +154,14 @@ func newSpyMetric(name, helpText string, opts []metrics.MetricOption) *SpyMetric } type SpyMetric struct { - mu sync.Mutex - value float64 - name string + mu sync.Mutex + value float64 + buckets []float64 + name string helpText string - keys []string - Opts *prometheus.Opts + keys []string + Opts *prometheus.Opts } func (s *SpyMetric) Set(c float64) { @@ -158,6 +194,12 @@ func (s *SpyMetric) HelpText() string { return s.helpText } +func (s *SpyMetric) Buckets() []float64 { + s.mu.Lock() + defer s.mu.Unlock() + return s.buckets +} + func getMetricName(name string, tags map[string]string) string { n := name diff --git a/src/vendor/code.cloudfoundry.org/go-metric-registry/tools.go b/src/vendor/code.cloudfoundry.org/go-metric-registry/tools.go new file mode 100644 index 00000000..434c010c --- /dev/null +++ b/src/vendor/code.cloudfoundry.org/go-metric-registry/tools.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package metrics + +import ( + _ "github.com/onsi/ginkgo/ginkgo" +) diff --git a/src/vendor/code.cloudfoundry.org/rfc5424/README.md b/src/vendor/code.cloudfoundry.org/rfc5424/README.md index 990975b5..2f332fdd 100644 --- a/src/vendor/code.cloudfoundry.org/rfc5424/README.md +++ b/src/vendor/code.cloudfoundry.org/rfc5424/README.md @@ -1,4 +1,4 @@ - +# NOTE: This repository is deprecated in favor of [go-loggregator](https://github.com/cloudfoundry/go-loggregator/tree/master/rfc5424) [![Build Status](https://travis-ci.org/crewjam/rfc5424.png)](https://travis-ci.org/crewjam/rfc5424) [![](https://godoc.org/github.com/crewjam/rfc5424?status.png)](http://godoc.org/github.com/crewjam/rfc5424) diff --git a/src/vendor/code.cloudfoundry.org/tlsconfig/.travis.yml b/src/vendor/code.cloudfoundry.org/tlsconfig/.travis.yml deleted file mode 100644 index 22cdd220..00000000 --- a/src/vendor/code.cloudfoundry.org/tlsconfig/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -language: go - -go: -- "1.x" -- "master" - -install: true - -script: -- env GO111MODULE=on go build ./... -- env GO111MODULE=on go test ./... diff --git a/src/vendor/code.cloudfoundry.org/tlsconfig/config.go b/src/vendor/code.cloudfoundry.org/tlsconfig/config.go index 05f4da2b..1dc787ff 100644 --- a/src/vendor/code.cloudfoundry.org/tlsconfig/config.go +++ b/src/vendor/code.cloudfoundry.org/tlsconfig/config.go @@ -86,7 +86,7 @@ func (c Config) Client(opts ...ClientOption) (*tls.Config, error) { func WithExternalServiceDefaults() TLSOption { return func(c *tls.Config) error { c.MinVersion = tls.VersionTLS12 - c.MaxVersion = tls.VersionTLS12 + c.MaxVersion = tls.VersionTLS13 c.PreferServerCipherSuites = false c.CipherSuites = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, @@ -115,7 +115,7 @@ func WithExternalServiceDefaults() TLSOption { func WithInternalServiceDefaults() TLSOption { return func(c *tls.Config) error { c.MinVersion = tls.VersionTLS12 - c.MaxVersion = tls.VersionTLS12 + c.MaxVersion = tls.VersionTLS13 c.PreferServerCipherSuites = true c.CipherSuites = []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, diff --git a/src/vendor/github.com/armon/go-metrics/inmem.go b/src/vendor/github.com/armon/go-metrics/inmem.go index e8206daa..93b0e0ad 100644 --- a/src/vendor/github.com/armon/go-metrics/inmem.go +++ b/src/vendor/github.com/armon/go-metrics/inmem.go @@ -10,8 +10,6 @@ import ( "time" ) -var spaceReplacer = strings.NewReplacer(" ", "_") - // InmemSink provides a MetricSink that does in-memory aggregation // without sending metrics over a network. It can be embedded within // an application to provide profiling information. @@ -314,21 +312,36 @@ func (i *InmemSink) getInterval() *IntervalMetrics { // Flattens the key for formatting, removes spaces func (i *InmemSink) flattenKey(parts []string) string { buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") - joined := strings.Join(parts, ".") - - spaceReplacer.WriteString(buf, joined) + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } return buf.String() } // Flattens the key for formatting along with its labels, removes spaces func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { - key := i.flattenKey(parts) - buf := bytes.NewBufferString(key) + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + key := buf.String() for _, label := range labels { - spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) } return buf.String(), key diff --git a/src/vendor/github.com/armon/go-metrics/metrics.go b/src/vendor/github.com/armon/go-metrics/metrics.go index 457b74bb..4920d683 100644 --- a/src/vendor/github.com/armon/go-metrics/metrics.go +++ b/src/vendor/github.com/armon/go-metrics/metrics.go @@ -269,25 +269,10 @@ func (m *Metrics) emitRuntimeStats() { m.lastNumGC = num } -// Creates a new slice with the provided string value as the first element -// and the provided slice values as the remaining values. -// Ordering of the values in the provided input slice is kept in tact in the output slice. +// Inserts a string value at an index into the slice func insert(i int, v string, s []string) []string { - // Allocate new slice to avoid modifying the input slice - newS := make([]string, len(s)+1) - - // Copy s[0, i-1] into newS - for j := 0; j < i; j++ { - newS[j] = s[j] - } - - // Insert provided element at index i - newS[i] = v - - // Copy s[i, len(s)-1] into newS starting at newS[i+1] - for j := i; j < len(s); j++ { - newS[j+1] = s[j] - } - - return newS + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s } diff --git a/src/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/src/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml new file mode 100644 index 00000000..1a0bbea6 --- /dev/null +++ b/src/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/src/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/src/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md deleted file mode 100644 index a967ae45..00000000 --- a/src/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md +++ /dev/null @@ -1,9 +0,0 @@ -# UNRELEASED - -FEATURES - -* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] - -# 1.0.0 (August 30th, 2018) - -* go mod adopted diff --git a/src/vendor/github.com/hashicorp/go-immutable-radix/README.md b/src/vendor/github.com/hashicorp/go-immutable-radix/README.md index aca15a64..8910fcc0 100644 --- a/src/vendor/github.com/hashicorp/go-immutable-radix/README.md +++ b/src/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -1,4 +1,4 @@ -go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) +go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix) ========= Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). @@ -39,28 +39,3 @@ if string(m) != "foo" { } ``` -Here is an example of performing a range scan of the keys. - -```go -// Create a tree -r := iradix.New() -r, _, _ = r.Insert([]byte("001"), 1) -r, _, _ = r.Insert([]byte("002"), 2) -r, _, _ = r.Insert([]byte("005"), 5) -r, _, _ = r.Insert([]byte("010"), 10) -r, _, _ = r.Insert([]byte("100"), 10) - -// Range scan over the keys that sort lexicographically between [003, 050) -it := r.Root().Iterator() -it.SeekLowerBound([]byte("003")) -for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { - if key >= "050" { - break - } - fmt.Println(key) -} -// Output: -// 005 -// 010 -``` - diff --git a/src/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/src/vendor/github.com/hashicorp/go-immutable-radix/iradix.go index 168bda76..e5e6e57f 100644 --- a/src/vendor/github.com/hashicorp/go-immutable-radix/iradix.go +++ b/src/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -86,20 +86,6 @@ func (t *Tree) Txn() *Txn { return txn } -// Clone makes an independent copy of the transaction. The new transaction -// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. -func (t *Txn) Clone() *Txn { - // reset the writable node cache to avoid leaking future writes into the clone - t.writable = nil - - txn := &Txn{ - root: t.root, - snap: t.snap, - size: t.size, - } - return txn -} - // TrackMutate can be used to toggle if mutations are tracked. If this is enabled // then notifications will be issued for affected internal nodes and leaves when // the transaction is committed. diff --git a/src/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/src/vendor/github.com/hashicorp/go-immutable-radix/iter.go index 1ecaf831..9815e025 100644 --- a/src/vendor/github.com/hashicorp/go-immutable-radix/iter.go +++ b/src/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -1,8 +1,6 @@ package iradix -import ( - "bytes" -) +import "bytes" // Iterator is used to iterate over a set of nodes // in pre-order @@ -55,101 +53,6 @@ func (i *Iterator) SeekPrefix(prefix []byte) { i.SeekPrefixWatch(prefix) } -func (i *Iterator) recurseMin(n *Node) *Node { - // Traverse to the minimum child - if n.leaf != nil { - return n - } - if len(n.edges) > 0 { - // Add all the other edges to the stack (the min node will be added as - // we recurse) - i.stack = append(i.stack, n.edges[1:]) - return i.recurseMin(n.edges[0].node) - } - // Shouldn't be possible - return nil -} - -// SeekLowerBound is used to seek the iterator to the smallest key that is -// greater or equal to the given key. There is no watch variant as it's hard to -// predict based on the radix structure which node(s) changes might affect the -// result. -func (i *Iterator) SeekLowerBound(key []byte) { - // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we - // go because we need only a subset of edges of many nodes in the path to the - // leaf with the lower bound. - i.stack = []edges{} - n := i.node - search := key - - found := func(n *Node) { - i.node = n - i.stack = append(i.stack, edges{edge{node: n}}) - } - - for { - // Compare current prefix with the search key's same-length prefix. - var prefixCmp int - if len(n.prefix) < len(search) { - prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) - } else { - prefixCmp = bytes.Compare(n.prefix, search) - } - - if prefixCmp > 0 { - // Prefix is larger, that means the lower bound is greater than the search - // and from now on we need to follow the minimum path to the smallest - // leaf under this subtree. - n = i.recurseMin(n) - if n != nil { - found(n) - } - return - } - - if prefixCmp < 0 { - // Prefix is smaller than search prefix, that means there is no lower - // bound - i.node = nil - return - } - - // Prefix is equal, we are still heading for an exact match. If this is a - // leaf we're done. - if n.leaf != nil { - if bytes.Compare(n.leaf.key, key) < 0 { - i.node = nil - return - } - found(n) - return - } - - // Consume the search prefix - if len(n.prefix) > len(search) { - search = []byte{} - } else { - search = search[len(n.prefix):] - } - - // Otherwise, take the lower bound next edge. - idx, lbNode := n.getLowerBoundEdge(search[0]) - if lbNode == nil { - i.node = nil - return - } - - // Create stack edges for the all strictly higher edges in this node. - if idx+1 < len(n.edges) { - i.stack = append(i.stack, n.edges[idx+1:]) - } - - i.node = lbNode - // Recurse - n = lbNode - } -} - // Next returns the next node in order func (i *Iterator) Next() ([]byte, interface{}, bool) { // Initialize our stack if needed diff --git a/src/vendor/github.com/hashicorp/go-immutable-radix/node.go b/src/vendor/github.com/hashicorp/go-immutable-radix/node.go index 3ab904ed..7a065e7a 100644 --- a/src/vendor/github.com/hashicorp/go-immutable-radix/node.go +++ b/src/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -79,18 +79,6 @@ func (n *Node) getEdge(label byte) (int, *Node) { return -1, nil } -func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - // we want lower bound behavior so return even if it's not an exact match - if idx < num { - return idx, n.edges[idx].node - } - return -1, nil -} - func (n *Node) delEdge(label byte) { num := len(n.edges) idx := sort.Search(num, func(i int) bool { diff --git a/src/vendor/github.com/hashicorp/go-msgpack/LICENSE b/src/vendor/github.com/hashicorp/go-msgpack/LICENSE index 95a0f054..ccae99f6 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/LICENSE +++ b/src/vendor/github.com/hashicorp/go-msgpack/LICENSE @@ -1,22 +1,25 @@ -The MIT License (MIT) - -Copyright (c) 2012-2015 Ugorji Nwoke. +Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the author nor the names of its contributors may be used + to endorse or promote products derived from this software + without specific prior written permission. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go new file mode 100644 index 00000000..c14d810a --- /dev/null +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Representative Benchmark Results + +Run the benchmark suite using: + go test -bi -bench=. -benchmem + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext_dep_test.go + +*/ +package codec diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/README.md b/src/vendor/github.com/hashicorp/go-msgpack/codec/README.md new file mode 100644 index 00000000..6c95d1bf --- /dev/null +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/README.md @@ -0,0 +1,174 @@ +# Codec + +High Performance and Feature-Rich Idiomatic Go Library providing +encode/decode support for different serialization formats. + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +Online documentation: [http://godoc.org/github.com/ugorji/go/codec] + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +## Representative Benchmark Results + +A sample run of benchmark using "go test -bi -bench=. -benchmem": + + /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) + + .............................................. + BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT + To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." + Benchmark: + Struct recursive Depth: 1 + ApproxDeepSize Of benchmark Struct: 4694 bytes + Benchmark One-Pass Run: + v-msgpack: len: 1600 bytes + bson: len: 3025 bytes + msgpack: len: 1560 bytes + binc: len: 1187 bytes + gob: len: 1972 bytes + json: len: 2538 bytes + .............................................. + PASS + Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op + Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op + Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op + Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op + Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op + Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op + Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op + Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op + Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op + Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op + Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op + Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op + Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op + Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op + ok ugorji.net/codec 30.827s + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext\_dep\_test.go + diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/binc.go index fd9f4890..2bb5e8fe 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/binc.go +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/binc.go @@ -1,16 +1,20 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. package codec import ( "math" - "reflect" + // "reflect" + // "sync/atomic" "time" + //"fmt" ) const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. +//var _ = fmt.Printf + // vd as low 4 bits (there are 16 slots) const ( bincVdSpecial byte = iota @@ -55,79 +59,31 @@ const ( // others not currently supported ) -func bincdesc(vd, vs byte) string { - switch vd { - case bincVdSpecial: - switch vs { - case bincSpNil: - return "nil" - case bincSpFalse: - return "false" - case bincSpTrue: - return "true" - case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat: - return "float" - case bincSpZero: - return "uint" - case bincSpNegOne: - return "int" - default: - return "unknown" - } - case bincVdSmallInt, bincVdPosInt: - return "uint" - case bincVdNegInt: - return "int" - case bincVdFloat: - return "float" - case bincVdSymbol: - return "string" - case bincVdString: - return "string" - case bincVdByteArray: - return "bytes" - case bincVdTimestamp: - return "time" - case bincVdCustomExt: - return "ext" - case bincVdArray: - return "array" - case bincVdMap: - return "map" - default: - return "unknown" - } -} - type bincEncDriver struct { - e *Encoder - h *BincHandle - w *encWriterSwitch + w encWriter m map[string]uint16 // symbols - b [16]byte // scratch, used for encoding numbers - bigendian style - s uint16 // symbols sequencer - // c containerState - encDriverTrackContainerWriter - noBuiltInTypes - // encNoSeparator - _ [1]uint64 // padding + s uint32 // symbols sequencer + b [8]byte } -func (e *bincEncDriver) EncodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) +func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId } -func (e *bincEncDriver) EncodeTime(t time.Time) { - if t.IsZero() { - e.EncodeNil() - } else { - bs := bincEncodeTime(t) +func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + bs := encodeTime(v.(time.Time)) e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) e.w.writeb(bs) } } -func (e *bincEncDriver) EncodeBool(b bool) { +func (e *bincEncDriver) encodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) encodeBool(b bool) { if b { e.w.writen1(bincVdSpecial<<4 | bincSpTrue) } else { @@ -135,21 +91,21 @@ func (e *bincEncDriver) EncodeBool(b bool) { } } -func (e *bincEncDriver) EncodeFloat32(f float32) { +func (e *bincEncDriver) encodeFloat32(f float32) { if f == 0 { e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) return } e.w.writen1(bincVdFloat<<4 | bincFlBin32) - bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) + e.w.writeUint32(math.Float32bits(f)) } -func (e *bincEncDriver) EncodeFloat64(f float64) { +func (e *bincEncDriver) encodeFloat64(f float64) { if f == 0 { e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) return } - bigen.PutUint64(e.b[:8], math.Float64bits(f)) + bigen.PutUint64(e.b[:], math.Float64bits(f)) if bincDoPrune { i := 7 for ; i >= 0 && (e.b[i] == 0); i-- { @@ -163,7 +119,7 @@ func (e *bincEncDriver) EncodeFloat64(f float64) { } } e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:8]) + e.w.writeb(e.b[:]) } func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { @@ -182,71 +138,64 @@ func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) } } -func (e *bincEncDriver) EncodeInt(v int64) { - // const nbd byte = bincVdNegInt << 4 - if v >= 0 { +func (e *bincEncDriver) encodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + switch { + case v >= 0: e.encUint(bincVdPosInt<<4, true, uint64(v)) - } else if v == -1 { + case v == -1: e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - } else { + default: e.encUint(bincVdNegInt<<4, false, uint64(-v)) } } -func (e *bincEncDriver) EncodeUint(v uint64) { +func (e *bincEncDriver) encodeUint(v uint64) { e.encUint(bincVdPosInt<<4, true, v) } func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - if v == 0 { + switch { + case v == 0: e.w.writen1(bincVdSpecial<<4 | bincSpZero) - } else if pos && v >= 1 && v <= 16 { + case pos && v >= 1 && v <= 16: e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - } else if v <= math.MaxUint8 { + case v <= math.MaxUint8: e.w.writen2(bd|0x0, byte(v)) - } else if v <= math.MaxUint16 { + case v <= math.MaxUint16: e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: e.encIntegerPrune(bd, pos, v, 4) - } else { + default: e.encIntegerPrune(bd, pos, v, 8) } } -func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(rv) - if bs == nil { - e.EncodeNil() - return - } - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) -} - -func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { e.encLen(bincVdCustomExt<<4, uint64(length)) e.w.writen1(xtag) } -func (e *bincEncDriver) WriteArrayStart(length int) { +func (e *bincEncDriver) encodeArrayPreamble(length int) { e.encLen(bincVdArray<<4, uint64(length)) - e.c = containerArrayStart } -func (e *bincEncDriver) WriteMapStart(length int) { +func (e *bincEncDriver) encodeMapPreamble(length int) { e.encLen(bincVdMap<<4, uint64(length)) - e.c = containerMapStart } -func (e *bincEncDriver) EncodeSymbol(v string) { +func (e *bincEncDriver) encodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeSymbol(v string) { // if WriteSymbolsNoRefs { - // e.encodeString(cUTF8, v) + // e.encodeString(c_UTF8, v) // return // } @@ -255,11 +204,12 @@ func (e *bincEncDriver) EncodeSymbol(v string) { //(bd with embedded length, and single byte for string val). l := len(v) - if l == 0 { - e.encBytesLen(cUTF8, 0) + switch l { + case 0: + e.encBytesLen(c_UTF8, 0) return - } else if l == 1 { - e.encBytesLen(cUTF8, 1) + case 1: + e.encBytesLen(c_UTF8, 1) e.w.writen1(v[0]) return } @@ -272,72 +222,45 @@ func (e *bincEncDriver) EncodeSymbol(v string) { e.w.writen2(bincVdSymbol<<4, byte(ui)) } else { e.w.writen1(bincVdSymbol<<4 | 0x8) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) + e.w.writeUint16(ui) } } else { e.s++ - ui = e.s + ui = uint16(e.s) //ui = uint16(atomic.AddUint32(&e.s, 1)) e.m[v] = ui var lenprec uint8 - if l <= math.MaxUint8 { + switch { + case l <= math.MaxUint8: // lenprec = 0 - } else if l <= math.MaxUint16 { + case l <= math.MaxUint16: lenprec = 1 - } else if int64(l) <= math.MaxUint32 { + case int64(l) <= math.MaxUint32: lenprec = 2 - } else { + default: lenprec = 3 } if ui <= math.MaxUint8 { e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) } else { e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) + e.w.writeUint16(ui) } - if lenprec == 0 { + switch lenprec { + case 0: e.w.writen1(byte(l)) - } else if lenprec == 1 { - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) - } else if lenprec == 2 { - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) - } else { - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) + case 1: + e.w.writeUint16(uint16(l)) + case 2: + e.w.writeUint32(uint32(l)) + default: + e.w.writeUint64(uint64(l)) } e.w.writestr(v) } } -func (e *bincEncDriver) EncodeString(c charEncoding, v string) { - if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) { - e.EncodeSymbol(v) - return - } - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeStringEnc(c charEncoding, v string) { - if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) { - e.EncodeSymbol(v) - return - } - l := uint64(len(v)) - e.encLen(bincVdString<<4, l) // e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } - -} - -func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - if v == nil { - e.EncodeNil() - return - } +func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { l := uint64(len(v)) e.encBytesLen(c, l) if l > 0 { @@ -345,21 +268,9 @@ func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { } } -func (e *bincEncDriver) EncodeStringBytesRaw(v []byte) { - if v == nil { - e.EncodeNil() - return - } - l := uint64(len(v)) - e.encLen(bincVdByteArray<<4, l) // e.encBytesLen(c, l) - if l > 0 { - e.w.writeb(v) - } -} - func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == cRAW { + if c == c_RAW { e.encLen(bincVdByteArray<<4, length) } else { e.encLen(bincVdString<<4, length) @@ -375,90 +286,93 @@ func (e *bincEncDriver) encLen(bd byte, l uint64) { } func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - if v <= math.MaxUint8 { + switch { + case v <= math.MaxUint8: e.w.writen2(bd, byte(v)) - } else if v <= math.MaxUint16 { + case v <= math.MaxUint16: e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: e.w.writen1(bd | 0x02) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) - } else { + e.w.writeUint32(uint32(v)) + default: e.w.writen1(bd | 0x03) - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) + e.w.writeUint64(uint64(v)) } } //------------------------------------ -type bincDecSymbol struct { - s string - b []byte - i uint16 -} - type bincDecDriver struct { - decDriverNoopContainerReader - noBuiltInTypes - - d *Decoder - h *BincHandle - r *decReaderSwitch - br bool // bytes reader + r decReader bdRead bool + bdType valueType bd byte vd byte vs byte - _ [3]byte // padding - // linear searching on this slice is ok, - // because we typically expect < 32 symbols in each stream. - s []bincDecSymbol - - // noStreamingCodec - // decNoSeparator - - b [(8 + 1) * 8]byte // scratch + b [8]byte + m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) } -func (d *bincDecDriver) readNextBd() { +func (d *bincDecDriver) initReadNext() { + if d.bdRead { + return + } d.bd = d.r.readn1() d.vd = d.bd >> 4 d.vs = d.bd & 0x0f d.bdRead = true -} - -func (d *bincDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *bincDecDriver) ContainerType() (vt valueType) { - if !d.bdRead { - d.readNextBd() - } - if d.vd == bincVdSpecial && d.vs == bincSpNil { - return valueTypeNil - } else if d.vd == bincVdByteArray { - return valueTypeBytes - } else if d.vd == bincVdString { - return valueTypeString - } else if d.vd == bincVdArray { - return valueTypeArray - } else if d.vd == bincVdMap { - return valueTypeMap + d.bdType = valueTypeUnset +} + +func (d *bincDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + d.bdType = valueTypeNil + case bincSpFalse, bincSpTrue: + d.bdType = valueTypeBool + case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: + d.bdType = valueTypeFloat + case bincSpZero: + d.bdType = valueTypeUint + case bincSpNegOne: + d.bdType = valueTypeInt + default: + decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + d.bdType = valueTypeUint + case bincVdPosInt: + d.bdType = valueTypeUint + case bincVdNegInt: + d.bdType = valueTypeInt + case bincVdFloat: + d.bdType = valueTypeFloat + case bincVdString: + d.bdType = valueTypeString + case bincVdSymbol: + d.bdType = valueTypeSymbol + case bincVdByteArray: + d.bdType = valueTypeBytes + case bincVdTimestamp: + d.bdType = valueTypeTimestamp + case bincVdCustomExt: + d.bdType = valueTypeExt + case bincVdArray: + d.bdType = valueTypeArray + case bincVdMap: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) + } } - // else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - // } - return valueTypeUnset + return d.bdType } -func (d *bincDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } +func (d *bincDecDriver) tryDecodeAsNil() bool { if d.bd == bincVdSpecial<<4|bincSpNil { d.bdRead = false return true @@ -466,24 +380,24 @@ func (d *bincDecDriver) TryDecodeAsNil() bool { return false } -func (d *bincDecDriver) DecodeTime() (t time.Time) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { +func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + if d.vd != bincVdTimestamp { + decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + } + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt d.bdRead = false - return } - if d.vd != bincVdTimestamp { - d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - return - } - t, err := bincDecodeTime(d.r.readx(uint(d.vs))) - if err != nil { - panic(err) - } - d.bdRead = false - return } func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { @@ -492,8 +406,7 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { } else { l := d.r.readn1() if l > 8 { - d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l) - return + decErr("At most 8 bytes used to represent float. Received: %v bytes", l) } for i := l; i < 8; i++ { d.b[i] = 0 @@ -503,17 +416,16 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { } func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } - if x := d.vs & 0x7; x == bincFlBin32 { - d.decFloatPre(d.vs, 4) + //if true { f = math.Float64frombits(d.r.readUint64()); break; } + switch vs := d.vs; vs & 0x7 { + case bincFlBin32: + d.decFloatPre(vs, 4) f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - } else if x == bincFlBin64 { - d.decFloatPre(d.vs, 8) + case bincFlBin64: + d.decFloatPre(vs, 8) f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - } else { - d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s", - msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - return + default: + decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) } return } @@ -524,148 +436,130 @@ func (d *bincDecDriver) decUint() (v uint64) { case 0: v = uint64(d.r.readn1()) case 1: - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) + d.r.readb(d.b[6:]) + v = uint64(bigen.Uint16(d.b[6:])) case 2: d.b[4] = 0 - d.r.readb(d.b[5:8]) - v = uint64(bigen.Uint32(d.b[4:8])) + d.r.readb(d.b[5:]) + v = uint64(bigen.Uint32(d.b[4:])) case 3: - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) + d.r.readb(d.b[4:]) + v = uint64(bigen.Uint32(d.b[4:])) case 4, 5, 6: - lim := 7 - d.vs - d.r.readb(d.b[lim:8]) - for i := uint8(0); i < lim; i++ { + lim := int(7 - d.vs) + d.r.readb(d.b[lim:]) + for i := 0; i < lim; i++ { d.b[i] = 0 } - v = uint64(bigen.Uint64(d.b[:8])) + v = uint64(bigen.Uint64(d.b[:])) case 7: - d.r.readb(d.b[:8]) - v = uint64(bigen.Uint64(d.b[:8])) + d.r.readb(d.b[:]) + v = uint64(bigen.Uint64(d.b[:])) default: - d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") - return + decErr("unsigned integers with greater than 64 bits of precision not supported") } return } -func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdPosInt { +func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.vd { + case bincVdPosInt: ui = d.decUint() - } else if vd == bincVdNegInt { + i = int64(ui) + case bincVdNegInt: ui = d.decUint() + i = -(int64(ui)) neg = true - } else if vd == bincVdSmallInt { + case bincVdSmallInt: + i = int64(d.vs) + 1 ui = uint64(d.vs) + 1 - } else if vd == bincVdSpecial { - if vs == bincSpZero { + case bincVdSpecial: + switch d.vs { + case bincSpZero: //i = 0 - } else if vs == bincSpNegOne { + case bincSpNegOne: neg = true ui = 1 - } else { - d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s", - d.vd, d.vs, bincdesc(d.vd, d.vs)) - return + i = -1 + default: + decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) } - } else { - d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) - return + default: + decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) } return } -func (d *bincDecDriver) DecodeInt64() (i int64) { - ui, neg := d.decCheckInteger() - i = chkOvf.SignedIntV(ui) - if neg { - i = -i - } +func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) d.bdRead = false return } -func (d *bincDecDriver) DecodeUint64() (ui uint64) { - ui, neg := d.decCheckInteger() +func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() if neg { - d.d.errorf("assigning negative signed value to unsigned integer type") - return + decErr("Assigning negative signed value: %v, to unsigned type", i) } + checkOverflow(ui, 0, bitsize) d.bdRead = false return } -func (d *bincDecDriver) DecodeFloat64() (f float64) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdSpecial { +func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.vd { + case bincVdSpecial: d.bdRead = false - if vs == bincSpNan { + switch d.vs { + case bincSpNan: return math.NaN() - } else if vs == bincSpPosInf { + case bincSpPosInf: return math.Inf(1) - } else if vs == bincSpZeroFloat || vs == bincSpZero { + case bincSpZeroFloat, bincSpZero: return - } else if vs == bincSpNegInf { + case bincSpNegInf: return math.Inf(-1) - } else { - d.d.errorf("float - invalid special value from descriptor %x-%x/%s", - d.vd, d.vs, bincdesc(d.vd, d.vs)) - return + default: + decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) } - } else if vd == bincVdFloat { + case bincVdFloat: f = d.decFloat() - } else { - f = float64(d.DecodeInt64()) + default: + _, i, _ := d.decIntAny() + f = float64(i) } + checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool only (single byte). -func (d *bincDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { +func (d *bincDecDriver) decodeBool() (b bool) { + switch d.bd { + case (bincVdSpecial | bincSpFalse): // b = false - } else if bd == (bincVdSpecial | bincSpTrue) { + case (bincVdSpecial | bincSpTrue): b = true - } else { - d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - return + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) } d.bdRead = false return } -func (d *bincDecDriver) ReadMapStart() (length int) { - if !d.bdRead { - d.readNextBd() - } +func (d *bincDecDriver) readMapLen() (length int) { if d.vd != bincVdMap { - d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - return + decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) } length = d.decLen() d.bdRead = false return } -func (d *bincDecDriver) ReadArrayStart() (length int) { - if !d.bdRead { - d.readNextBd() - } +func (d *bincDecDriver) readArrayLen() (length int) { if d.vd != bincVdArray { - d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - return + decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) } length = d.decLen() d.bdRead = false @@ -673,299 +567,189 @@ func (d *bincDecDriver) ReadArrayStart() (length int) { } func (d *bincDecDriver) decLen() int { - if d.vs > 3 { - return int(d.vs - 4) + if d.vs <= 3 { + return int(d.decUint()) } - return int(d.decLenNumber()) + return int(d.vs - 4) } -func (d *bincDecDriver) decLenNumber() (v uint64) { - if x := d.vs; x == 0 { - v = uint64(d.r.readn1()) - } else if x == 1 { - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - } else if x == 2 { - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - } else { - d.r.readb(d.b[:8]) - v = bigen.Uint64(d.b[:8]) - } - return -} - -func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) ( - bs2 []byte, s string) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return - } - var slen = -1 - // var ok bool +func (d *bincDecDriver) decodeString() (s string) { switch d.vd { case bincVdString, bincVdByteArray: - slen = d.decLen() - if zerocopy { - if d.br { - bs2 = d.r.readx(uint(slen)) - } else if len(bs) == 0 { - bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:]) - } else { - bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) - } - } else { - bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) - } - if withString { - s = string(bs2) + if length := d.decLen(); length > 0 { + s = string(d.r.readn(length)) } case bincVdSymbol: - // zerocopy doesn't apply for symbols, - // as the values must be stored in a table for later use. - // //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, //extract symbol //if containsStringVal, read it and put in map //else look in map for string value - var symbol uint16 + var symbol uint32 vs := d.vs + //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) if vs&0x8 == 0 { - symbol = uint16(d.r.readn1()) + symbol = uint32(d.r.readn1()) } else { - symbol = uint16(bigen.Uint16(d.r.readx(2))) + symbol = uint32(d.r.readUint16()) } - if d.s == nil { - d.s = make([]bincDecSymbol, 0, 16) + if d.m == nil { + d.m = make(map[uint32]string, 16) } if vs&0x4 == 0 { - for i := range d.s { - j := &d.s[i] - if j.i == symbol { - bs2 = j.b - if withString { - if j.s == "" && bs2 != nil { - j.s = string(bs2) - } - s = j.s - } - break - } - } + s = d.m[symbol] } else { + var slen int switch vs & 0x3 { case 0: slen = int(d.r.readn1()) case 1: - slen = int(bigen.Uint16(d.r.readx(2))) + slen = int(d.r.readUint16()) case 2: - slen = int(bigen.Uint32(d.r.readx(4))) + slen = int(d.r.readUint32()) case 3: - slen = int(bigen.Uint64(d.r.readx(8))) + slen = int(d.r.readUint64()) } - // since using symbols, do not store any part of - // the parameter bs in the map, as it might be a shared buffer. - // bs2 = decByteSlice(d.r, slen, bs) - bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil) - if withString { - s = string(bs2) - } - d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) + s = string(d.r.readn(slen)) + d.m[symbol] = s } default: - d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - return + decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) } d.bdRead = false return } -func (d *bincDecDriver) DecodeString() (s string) { - // DecodeBytes does not accommodate symbols, whose impl stores string version in map. - // Use decStringAndBytes directly. - // return string(d.DecodeBytes(d.b[:], true, true)) - _, s = d.decStringAndBytes(d.b[:], true, true) - return -} - -func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) { - s, _ = d.decStringAndBytes(d.b[:], false, true) - return -} - -func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return nil - } - // check if an "array" of uint8's (see ContainerType for how to infer if an array) - if d.vd == bincVdArray { - bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) - return - } +func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { var clen int - if d.vd == bincVdString || d.vd == bincVdByteArray { + switch d.vd { + case bincVdString, bincVdByteArray: clen = d.decLen() - } else { - d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - return - } - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(uint(clen)) - } else if len(bs) == 0 { - bs = d.b[:] + default: + decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true } + d.r.readb(bs) } - return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) -} - -func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } + d.bdRead = false return } -func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.vd == bincVdCustomExt { +func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.vd { + case bincVdCustomExt: l := d.decLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag) - return - } - if d.br { - xbs = d.r.readx(uint(l)) - } else { - xbs = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) } - } else if d.vd == bincVdByteArray { - xbs = d.DecodeBytes(nil, true) - } else { - d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s", - msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - return + xbs = d.r.readn(l) + case bincVdByteArray: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) } d.bdRead = false return } -func (d *bincDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := d.d.naked() - var decodeFurther bool +func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() switch d.vd { case bincVdSpecial: switch d.vs { case bincSpNil: - n.v = valueTypeNil + vt = valueTypeNil case bincSpFalse: - n.v = valueTypeBool - n.b = false + vt = valueTypeBool + v = false case bincSpTrue: - n.v = valueTypeBool - n.b = true + vt = valueTypeBool + v = true case bincSpNan: - n.v = valueTypeFloat - n.f = math.NaN() + vt = valueTypeFloat + v = math.NaN() case bincSpPosInf: - n.v = valueTypeFloat - n.f = math.Inf(1) + vt = valueTypeFloat + v = math.Inf(1) case bincSpNegInf: - n.v = valueTypeFloat - n.f = math.Inf(-1) + vt = valueTypeFloat + v = math.Inf(-1) case bincSpZeroFloat: - n.v = valueTypeFloat - n.f = float64(0) + vt = valueTypeFloat + v = float64(0) case bincSpZero: - n.v = valueTypeUint - n.u = uint64(0) // int8(0) + vt = valueTypeUint + v = int64(0) // int8(0) case bincSpNegOne: - n.v = valueTypeInt - n.i = int64(-1) // int8(-1) + vt = valueTypeInt + v = int64(-1) // int8(-1) default: - d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s", - d.vd, d.vs, bincdesc(d.vd, d.vs)) + decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) } case bincVdSmallInt: - n.v = valueTypeUint - n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + vt = valueTypeUint + v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 case bincVdPosInt: - n.v = valueTypeUint - n.u = d.decUint() + vt = valueTypeUint + v = d.decUint() case bincVdNegInt: - n.v = valueTypeInt - n.i = -(int64(d.decUint())) + vt = valueTypeInt + v = -(int64(d.decUint())) case bincVdFloat: - n.v = valueTypeFloat - n.f = d.decFloat() + vt = valueTypeFloat + v = d.decFloat() case bincVdSymbol: - n.v = valueTypeSymbol - n.s = d.DecodeString() + vt = valueTypeSymbol + v = d.decodeString() case bincVdString: - n.v = valueTypeString - n.s = d.DecodeString() + vt = valueTypeString + v = d.decodeString() case bincVdByteArray: - decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) case bincVdTimestamp: - n.v = valueTypeTime - tt, err := bincDecodeTime(d.r.readx(uint(d.vs))) + vt = valueTypeTimestamp + tt, err := decodeTime(d.r.readn(int(d.vs))) if err != nil { panic(err) } - n.t = tt + v = tt case bincVdCustomExt: - n.v = valueTypeExt + vt = valueTypeExt l := d.decLen() - n.u = uint64(d.r.readn1()) - if d.br { - n.l = d.r.readx(uint(l)) - } else { - n.l = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) - } + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt case bincVdArray: - n.v = valueTypeArray + vt = valueTypeArray decodeFurther = true case bincVdMap: - n.v = valueTypeMap + vt = valueTypeMap decodeFurther = true default: - d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) } if !decodeFurther { d.bdRead = false } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } + return } //------------------------------------ @@ -980,224 +764,23 @@ func (d *bincDecDriver) DecodeNaked() { // extended precision and decimal IEEE 754 floats are unsupported. // - Only UTF-8 strings supported. // Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. -// //Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. type BincHandle struct { BasicHandle - binaryEncodingType - noElemSeparators - - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Values: - // - 0: default: library uses best judgement - // - 1: use symbols - // - 2: do not use symbols - AsSymbols uint8 - - // AsSymbols: may later on introduce more options ... - // - m: map keys - // - s: struct fields - // - n: none - // - a: all: same as m, s, ... - - // _ [1]uint64 // padding -} - -// Name returns the name of the handle: binc -func (h *BincHandle) Name() string { return "binc" } - -// SetBytesExt sets an extension -func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) -} - -func (h *BincHandle) newEncDriver(e *Encoder) encDriver { - return &bincEncDriver{e: e, h: h, w: e.w} -} - -func (h *BincHandle) newDecDriver(d *Decoder) decDriver { - return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} } -func (e *bincEncDriver) reset() { - e.w = e.e.w - e.s = 0 - e.c = 0 - e.m = nil +func (h *BincHandle) newEncDriver(w encWriter) encDriver { + return &bincEncDriver{w: w} } -func (d *bincDecDriver) reset() { - d.r, d.br = d.d.r, d.d.bytes - d.s = nil - d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 +func (h *BincHandle) newDecDriver(r decReader) decDriver { + return &bincDecDriver{r: r} } -// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -// -func bincEncodeTime(t time.Time) []byte { - //t := rv.Interface().(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - btmp [8]byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - bigen.PutUint64(btmp[:], uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] +func (_ *BincHandle) writeExt() bool { + return true } -// bincDecodeTime decodes a []byte into a time.Time. -func bincDecodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printf.d. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - // i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return +func (h *BincHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle } - -var _ decDriver = (*bincDecDriver)(nil) -var _ encDriver = (*bincEncDriver)(nil) diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/build.sh b/src/vendor/github.com/hashicorp/go-msgpack/codec/build.sh deleted file mode 100644 index dd79c13f..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/build.sh +++ /dev/null @@ -1,267 +0,0 @@ -#!/bin/bash - -# Run all the different permutations of all the tests and other things -# This helps ensure that nothing gets broken. - -_tests() { - local gover=$( go version | cut -f 3 -d ' ' ) - # note that codecgen requires fastpath, so you cannot do "codecgen notfastpath" - local a=( "" "safe" "notfastpath" "notfastpath safe" "codecgen" "codecgen safe" ) - for i in "${a[@]}" - do - echo ">>>> TAGS: $i" - local i2=${i:-default} - case $gover in - go1.[0-6]*) go vet -printfuncs "errorf" "$@" && - go test ${zargs[*]} -vet off -tags "$i" "$@" ;; - *) go vet -printfuncs "errorf" "$@" && - go test ${zargs[*]} -vet off -tags "alltests $i" -run "Suite" -coverprofile "${i2// /-}.cov.out" "$@" ;; - esac - if [[ "$?" != 0 ]]; then return 1; fi - done - echo "++++++++ TEST SUITES ALL PASSED ++++++++" -} - - -# is a generation needed? -_ng() { - local a="$1" - if [[ ! -e "$a" ]]; then echo 1; return; fi - for i in `ls -1 *.go.tmpl gen.go values_test.go` - do - if [[ "$a" -ot "$i" ]]; then echo 1; return; fi - done -} - -_prependbt() { - cat > ${2} <> ${2} - rm -f ${1} -} - -# _build generates fast-path.go and gen-helper.go. -_build() { - if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi - - if [ "${zbak}" ]; then - _zts=`date '+%m%d%Y_%H%M%S'` - _gg=".generated.go" - [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak - [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak - [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak - fi - rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \ - *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go - - cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl - cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl - cat >> gen.generated.go <> gen.generated.go < gen-enc-chan.go.tmpl - cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < " + fnameOut + " ______") -fin, err := os.Open(fnameIn) -if err != nil { panic(err) } -defer fin.Close() -fout, err := os.Create(fnameOut) -if err != nil { panic(err) } -defer fout.Close() -err = codec.GenInternalGoFile(fin, fout) -if err != nil { panic(err) } -} - -func main() { -run("fast-path.go.tmpl", "fast-path.generated.go") -run("gen-helper.go.tmpl", "gen-helper.generated.go") -run("mammoth-test.go.tmpl", "mammoth_generated_test.go") -run("mammoth2-test.go.tmpl", "mammoth2_generated_test.go") -} -EOF - - sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/ugorji/go/codec"+' \ - shared_test.go > bench/shared_test.go - - # explicitly return 0 if this passes, else return 1 - go run -tags "notfastpath safe codecgen.exec" gen-from-tmpl.generated.go && - rm -f gen-from-tmpl.*generated.go && - return 0 - return 1 -} - -_codegenerators() { - local c5="_generated_test.go" - local c7="$PWD/codecgen" - local c8="$c7/__codecgen" - local c9="codecgen-scratch.go" - - if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi - - # Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen - true && - echo "codecgen ... " && - if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then - echo "rebuilding codecgen ... " && ( cd codecgen && go build -o $c8 ${zargs[*]} . ) - fi && - $c8 -rt codecgen -t 'codecgen generated' -o values_codecgen${c5} -d 19780 $zfin $zfin2 && - cp mammoth2_generated_test.go $c9 && - $c8 -t '!notfastpath' -o mammoth2_codecgen${c5} -d 19781 mammoth2_generated_test.go && - rm -f $c9 && - echo "generators done!" -} - -_prebuild() { - echo "prebuild: zforce: $zforce" - local d="$PWD" - zfin="test_values.generated.go" - zfin2="test_values_flex.generated.go" - zpkg="github.com/ugorji/go/codec" - # zpkg=${d##*/src/} - # zgobase=${d%%/src/*} - # rm -f *_generated_test.go - rm -f codecgen-*.go && - _build && - cp $d/values_test.go $d/$zfin && - cp $d/values_flex_test.go $d/$zfin2 && - _codegenerators && - if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi && - if [[ $zforce ]]; then go install ${zargs[*]} .; fi && - echo "prebuild done successfully" - rm -f $d/$zfin $d/$zfin2 - unset zfin zfin2 zpkg -} - -_make() { - zforce=1 - (cd codecgen && go install ${zargs[*]} .) && _prebuild && go install ${zargs[*]} . - unset zforce -} - -_clean() { - rm -f gen-from-tmpl.*generated.go \ - codecgen-*.go \ - test_values.generated.go test_values_flex.generated.go -} - -_release() { - local reply - read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply - echo - if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi - - # expects GOROOT, GOROOT_BOOTSTRAP to have been set. - if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi - # (cd $GOROOT && git checkout -f master && git pull && git reset --hard) - (cd $GOROOT && git pull) - local f=`pwd`/make.release.out - cat > $f <>$f - if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi - (false || - (echo "===== BUILDING GO SDK for branch: $i ... =====" && - cd $GOROOT && - git checkout -f $i && git reset --hard && git clean -f . && - cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) && - echo "===== GO SDK BUILD DONE =====" && - _prebuild && - echo "===== PREBUILD DONE with exit: $? =====" && - _tests "$@" - if [[ "$?" != 0 ]]; then return 1; fi - done - unset zforce - echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++" -} - -_usage() { - cat < [tests, make, prebuild (force) (external), inlining diagnostics, mid-stack inlining, race detector] - -v -> verbose -EOF - if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi -} - -_main() { - if [[ -z "$1" ]]; then _usage; return 1; fi - local x - unset zforce - zargs=() - zbenchflags="" - OPTIND=1 - while getopts ":ctmnrgpfvlzdb:" flag - do - case "x$flag" in - 'xf') zforce=1 ;; - 'xv') zverbose=1 ;; - 'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;; - 'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;; - 'xd') zargs+=("-race") ;; - 'xb') x='b'; zbenchflags=${OPTARG} ;; - x\?) _usage; return 1 ;; - *) x=$flag ;; - esac - done - shift $((OPTIND-1)) - # echo ">>>> _main: extra args: $@" - case "x$x" in - 'xt') _tests "$@" ;; - 'xm') _make "$@" ;; - 'xr') _release "$@" ;; - 'xg') _go ;; - 'xp') _prebuild "$@" ;; - 'xc') _clean "$@" ;; - 'xz') _analyze "$@" ;; - 'xb') _bench "$@" ;; - esac - unset zforce zargs zbenchflags -} - -[ "." = `dirname $0` ] && _main "$@" - diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go deleted file mode 100644 index 7833f9d6..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go +++ /dev/null @@ -1,767 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" - "time" -) - -const ( - cborMajorUint byte = iota - cborMajorNegInt - cborMajorBytes - cborMajorText - cborMajorArray - cborMajorMap - cborMajorTag - cborMajorOther -) - -const ( - cborBdFalse byte = 0xf4 + iota - cborBdTrue - cborBdNil - cborBdUndefined - cborBdExt - cborBdFloat16 - cborBdFloat32 - cborBdFloat64 -) - -const ( - cborBdIndefiniteBytes byte = 0x5f - cborBdIndefiniteString byte = 0x7f - cborBdIndefiniteArray byte = 0x9f - cborBdIndefiniteMap byte = 0xbf - cborBdBreak byte = 0xff -) - -// These define some in-stream descriptors for -// manual encoding e.g. when doing explicit indefinite-length -const ( - CborStreamBytes byte = 0x5f - CborStreamString byte = 0x7f - CborStreamArray byte = 0x9f - CborStreamMap byte = 0xbf - CborStreamBreak byte = 0xff -) - -const ( - cborBaseUint byte = 0x00 - cborBaseNegInt byte = 0x20 - cborBaseBytes byte = 0x40 - cborBaseString byte = 0x60 - cborBaseArray byte = 0x80 - cborBaseMap byte = 0xa0 - cborBaseTag byte = 0xc0 - cborBaseSimple byte = 0xe0 -) - -func cbordesc(bd byte) string { - switch bd { - case cborBdNil: - return "nil" - case cborBdFalse: - return "false" - case cborBdTrue: - return "true" - case cborBdFloat16, cborBdFloat32, cborBdFloat64: - return "float" - case cborBdIndefiniteBytes: - return "bytes*" - case cborBdIndefiniteString: - return "string*" - case cborBdIndefiniteArray: - return "array*" - case cborBdIndefiniteMap: - return "map*" - default: - switch { - case bd >= cborBaseUint && bd < cborBaseNegInt: - return "(u)int" - case bd >= cborBaseNegInt && bd < cborBaseBytes: - return "int" - case bd >= cborBaseBytes && bd < cborBaseString: - return "bytes" - case bd >= cborBaseString && bd < cborBaseArray: - return "string" - case bd >= cborBaseArray && bd < cborBaseMap: - return "array" - case bd >= cborBaseMap && bd < cborBaseTag: - return "map" - case bd >= cborBaseTag && bd < cborBaseSimple: - return "ext" - default: - return "unknown" - } - } -} - -// ------------------- - -type cborEncDriver struct { - noBuiltInTypes - encDriverNoopContainerWriter - e *Encoder - w *encWriterSwitch - h *CborHandle - x [8]byte - // _ [3]uint64 // padding -} - -func (e *cborEncDriver) EncodeNil() { - e.w.writen1(cborBdNil) -} - -func (e *cborEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(cborBdTrue) - } else { - e.w.writen1(cborBdFalse) - } -} - -func (e *cborEncDriver) EncodeFloat32(f float32) { - e.w.writen1(cborBdFloat32) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *cborEncDriver) EncodeFloat64(f float64) { - e.w.writen1(cborBdFloat64) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *cborEncDriver) encUint(v uint64, bd byte) { - if v <= 0x17 { - e.w.writen1(byte(v) + bd) - } else if v <= math.MaxUint8 { - e.w.writen2(bd+0x18, uint8(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd + 0x19) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd + 0x1a) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) - } else { // if v <= math.MaxUint64 { - e.w.writen1(bd + 0x1b) - bigenHelper{e.x[:8], e.w}.writeUint64(v) - } -} - -func (e *cborEncDriver) EncodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-1-v), cborBaseNegInt) - } else { - e.encUint(uint64(v), cborBaseUint) - } -} - -func (e *cborEncDriver) EncodeUint(v uint64) { - e.encUint(v, cborBaseUint) -} - -func (e *cborEncDriver) encLen(bd byte, length int) { - e.encUint(uint64(length), bd) -} - -func (e *cborEncDriver) EncodeTime(t time.Time) { - if t.IsZero() { - e.EncodeNil() - } else if e.h.TimeRFC3339 { - e.encUint(0, cborBaseTag) - e.EncodeStringEnc(cUTF8, t.Format(time.RFC3339Nano)) - } else { - e.encUint(1, cborBaseTag) - t = t.UTC().Round(time.Microsecond) - sec, nsec := t.Unix(), uint64(t.Nanosecond()) - if nsec == 0 { - e.EncodeInt(sec) - } else { - e.EncodeFloat64(float64(sec) + float64(nsec)/1e9) - } - } -} - -func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - e.encUint(uint64(xtag), cborBaseTag) - if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - e.encUint(uint64(re.Tag), cborBaseTag) - // only encodes re.Value (never re.Data) - // if false && re.Data != nil { - // en.encode(re.Data) - // } else if re.Value != nil { - if re.Value != nil { - en.encode(re.Value) - } else { - e.EncodeNil() - } -} - -func (e *cborEncDriver) WriteArrayStart(length int) { - if e.h.IndefiniteLength { - e.w.writen1(cborBdIndefiniteArray) - } else { - e.encLen(cborBaseArray, length) - } -} - -func (e *cborEncDriver) WriteMapStart(length int) { - if e.h.IndefiniteLength { - e.w.writen1(cborBdIndefiniteMap) - } else { - e.encLen(cborBaseMap, length) - } -} - -func (e *cborEncDriver) WriteMapEnd() { - if e.h.IndefiniteLength { - e.w.writen1(cborBdBreak) - } -} - -func (e *cborEncDriver) WriteArrayEnd() { - if e.h.IndefiniteLength { - e.w.writen1(cborBdBreak) - } -} - -func (e *cborEncDriver) EncodeString(c charEncoding, v string) { - e.encStringBytesS(cborBaseString, v) -} - -func (e *cborEncDriver) EncodeStringEnc(c charEncoding, v string) { - e.encStringBytesS(cborBaseString, v) -} - -func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - if v == nil { - e.EncodeNil() - } else if c == cRAW { - e.encStringBytesS(cborBaseBytes, stringView(v)) - } else { - e.encStringBytesS(cborBaseString, stringView(v)) - } -} - -func (e *cborEncDriver) EncodeStringBytesRaw(v []byte) { - if v == nil { - e.EncodeNil() - } else { - e.encStringBytesS(cborBaseBytes, stringView(v)) - } -} - -func (e *cborEncDriver) encStringBytesS(bb byte, v string) { - if e.h.IndefiniteLength { - if bb == cborBaseBytes { - e.w.writen1(cborBdIndefiniteBytes) - } else { - e.w.writen1(cborBdIndefiniteString) - } - var vlen uint = uint(len(v)) - blen := vlen / 4 - if blen == 0 { - blen = 64 - } else if blen > 1024 { - blen = 1024 - } - for i := uint(0); i < vlen; { - var v2 string - i2 := i + blen - if i2 < vlen { - v2 = v[i:i2] - } else { - v2 = v[i:] - } - e.encLen(bb, len(v2)) - e.w.writestr(v2) - i = i2 - } - e.w.writen1(cborBdBreak) - } else { - e.encLen(bb, len(v)) - e.w.writestr(v) - } -} - -// ---------------------- - -type cborDecDriver struct { - d *Decoder - h *CborHandle - r *decReaderSwitch - br bool // bytes reader - bdRead bool - bd byte - noBuiltInTypes - // decNoSeparator - decDriverNoopContainerReader - // _ [3]uint64 // padding -} - -func (d *cborDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *cborDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *cborDecDriver) ContainerType() (vt valueType) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdNil { - return valueTypeNil - } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { - return valueTypeBytes - } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { - return valueTypeString - } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { - return valueTypeArray - } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { - return valueTypeMap - } - // else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - // } - return valueTypeUnset -} - -func (d *cborDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - // treat Nil and Undefined as nil values - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) CheckBreak() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdBreak { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) decUint() (ui uint64) { - v := d.bd & 0x1f - if v <= 0x17 { - ui = uint64(v) - } else { - if v == 0x18 { - ui = uint64(d.r.readn1()) - } else if v == 0x19 { - ui = uint64(bigen.Uint16(d.r.readx(2))) - } else if v == 0x1a { - ui = uint64(bigen.Uint32(d.r.readx(4))) - } else if v == 0x1b { - ui = uint64(bigen.Uint64(d.r.readx(8))) - } else { - d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) - return - } - } - return -} - -func (d *cborDecDriver) decCheckInteger() (neg bool) { - if !d.bdRead { - d.readNextBd() - } - major := d.bd >> 5 - if major == cborMajorUint { - } else if major == cborMajorNegInt { - neg = true - } else { - d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", - major, d.bd, cbordesc(d.bd)) - return - } - return -} - -func (d *cborDecDriver) DecodeInt64() (i int64) { - neg := d.decCheckInteger() - ui := d.decUint() - // check if this number can be converted to an int without overflow - if neg { - i = -(chkOvf.SignedIntV(ui + 1)) - } else { - i = chkOvf.SignedIntV(ui) - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeUint64() (ui uint64) { - if d.decCheckInteger() { - d.d.errorf("assigning negative signed value to unsigned type") - return - } - ui = d.decUint() - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeFloat64() (f float64) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdFloat16 { - f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) - } else if bd == cborBdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if bd == cborBdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else if bd >= cborBaseUint && bd < cborBaseBytes { - f = float64(d.DecodeInt64()) - } else { - d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd)) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *cborDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdTrue { - b = true - } else if bd == cborBdFalse { - } else { - d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd)) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) ReadMapStart() (length int) { - if !d.bdRead { - d.readNextBd() - } - d.bdRead = false - if d.bd == cborBdIndefiniteMap { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) ReadArrayStart() (length int) { - if !d.bdRead { - d.readNextBd() - } - d.bdRead = false - if d.bd == cborBdIndefiniteArray { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) decLen() int { - return int(d.decUint()) -} - -func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { - d.bdRead = false - for { - if d.CheckBreak() { - break - } - if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { - d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+ - " got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd)) - return nil - } - n := d.decLen() - oldLen := len(bs) - newLen := oldLen + n - if newLen > cap(bs) { - bs2 := make([]byte, newLen, 2*cap(bs)+n) - copy(bs2, bs) - bs = bs2 - } else { - bs = bs[:newLen] - } - d.r.readb(bs[oldLen:newLen]) - // bs = append(bs, d.r.readn()...) - d.bdRead = false - } - d.bdRead = false - return bs -} - -func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return nil - } - if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { - d.bdRead = false - if bs == nil { - if zerocopy { - return d.decAppendIndefiniteBytes(d.d.b[:0]) - } - return d.decAppendIndefiniteBytes(zeroByteSlice) - } - return d.decAppendIndefiniteBytes(bs[:0]) - } - // check if an "array" of uint8's (see ContainerType for how to infer if an array) - if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { - bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) - return - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(uint(clen)) - } else if len(bs) == 0 { - bs = d.d.b[:] - } - } - return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) -} - -func (d *cborDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.d.b[:], true)) -} - -func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { - return d.DecodeBytes(d.d.b[:], true) -} - -func (d *cborDecDriver) DecodeTime() (t time.Time) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return - } - xtag := d.decUint() - d.bdRead = false - return d.decodeTime(xtag) -} - -func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) { - if !d.bdRead { - d.readNextBd() - } - switch xtag { - case 0: - var err error - if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil { - d.d.errorv(err) - } - case 1: - // decode an int64 or a float, and infer time.Time from there. - // for floats, round to microseconds, as that is what is guaranteed to fit well. - switch { - case d.bd == cborBdFloat16, d.bd == cborBdFloat32: - f1, f2 := math.Modf(d.DecodeFloat64()) - t = time.Unix(int64(f1), int64(f2*1e9)) - case d.bd == cborBdFloat64: - f1, f2 := math.Modf(d.DecodeFloat64()) - t = time.Unix(int64(f1), int64(f2*1e9)) - case d.bd >= cborBaseUint && d.bd < cborBaseNegInt, - d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: - t = time.Unix(d.DecodeInt64(), 0) - default: - d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)") - } - default: - d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag) - } - t = t.UTC().Round(time.Microsecond) - return -} - -func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if !d.bdRead { - d.readNextBd() - } - u := d.decUint() - d.bdRead = false - realxtag = u - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - d.d.decode(&re.Value) - } else if xtag != realxtag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) - return - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := d.d.naked() - var decodeFurther bool - - switch d.bd { - case cborBdNil: - n.v = valueTypeNil - case cborBdFalse: - n.v = valueTypeBool - n.b = false - case cborBdTrue: - n.v = valueTypeBool - n.b = true - case cborBdFloat16, cborBdFloat32, cborBdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat64() - case cborBdIndefiniteBytes: - decNakedReadRawBytes(d, d.d, n, d.h.RawToString) - case cborBdIndefiniteString: - n.v = valueTypeString - n.s = d.DecodeString() - case cborBdIndefiniteArray: - n.v = valueTypeArray - decodeFurther = true - case cborBdIndefiniteMap: - n.v = valueTypeMap - decodeFurther = true - default: - switch { - case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt64() - } else { - n.v = valueTypeUint - n.u = d.DecodeUint64() - } - case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: - n.v = valueTypeInt - n.i = d.DecodeInt64() - case d.bd >= cborBaseBytes && d.bd < cborBaseString: - decNakedReadRawBytes(d, d.d, n, d.h.RawToString) - case d.bd >= cborBaseString && d.bd < cborBaseArray: - n.v = valueTypeString - n.s = d.DecodeString() - case d.bd >= cborBaseArray && d.bd < cborBaseMap: - n.v = valueTypeArray - decodeFurther = true - case d.bd >= cborBaseMap && d.bd < cborBaseTag: - n.v = valueTypeMap - decodeFurther = true - case d.bd >= cborBaseTag && d.bd < cborBaseSimple: - n.v = valueTypeExt - n.u = d.decUint() - n.l = nil - if n.u == 0 || n.u == 1 { - d.bdRead = false - n.v = valueTypeTime - n.t = d.decodeTime(n.u) - } - // d.bdRead = false - // d.d.decode(&re.Value) // handled by decode itself. - // decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - return - } - } - - if !decodeFurther { - d.bdRead = false - } -} - -// ------------------------- - -// CborHandle is a Handle for the CBOR encoding format, -// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . -// -// CBOR is comprehensively supported, including support for: -// - indefinite-length arrays/maps/bytes/strings -// - (extension) tags in range 0..0xffff (0 .. 65535) -// - half, single and double-precision floats -// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) -// - nil, true, false, ... -// - arrays and maps, bytes and text strings -// -// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. -// Users can implement them as needed (using SetExt), including spec-documented ones: -// - timestamp, BigNum, BigFloat, Decimals, -// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. -type CborHandle struct { - binaryEncodingType - noElemSeparators - BasicHandle - - // IndefiniteLength=true, means that we encode using indefinitelength - IndefiniteLength bool - - // TimeRFC3339 says to encode time.Time using RFC3339 format. - // If unset, we encode time.Time using seconds past epoch. - TimeRFC3339 bool - - // _ [1]uint64 // padding -} - -// Name returns the name of the handle: cbor -func (h *CborHandle) Name() string { return "cbor" } - -// SetInterfaceExt sets an extension -func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) -} - -func (h *CborHandle) newEncDriver(e *Encoder) encDriver { - return &cborEncDriver{e: e, w: e.w, h: h} -} - -func (h *CborHandle) newDecDriver(d *Decoder) decDriver { - return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes} -} - -func (e *cborEncDriver) reset() { - e.w = e.e.w -} - -func (d *cborDecDriver) reset() { - d.r, d.br = d.d.r, d.d.bytes - d.bd, d.bdRead = 0, false -} - -var _ decDriver = (*cborDecDriver)(nil) -var _ encDriver = (*cborEncDriver)(nil) diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go deleted file mode 100644 index cc5ecec6..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build codecgen generated - -package codec - -// this file is here, to set the codecgen variable to true -// when the build tag codecgen is set. -// -// this allows us do specific things e.g. skip missing fields tests, -// when running in codecgen mode. - -func init() { - codecgen = true -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/decode.go index 27d36201..851b54ac 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/decode.go +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/decode.go @@ -1,2439 +1,578 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. package codec import ( - "encoding" - "errors" - "fmt" "io" "reflect" - "runtime" - "strconv" - "time" + // "runtime/debug" ) // Some tagging information for error messages. const ( - msgBadDesc = "unrecognized descriptor byte" - // msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" + msgTagDec = "codec.decoder" + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" ) -const ( - decDefMaxDepth = 1024 // maximum depth - decDefSliceCap = 8 - decDefChanCap = 64 // should be large, as cap cannot be expanded - decScratchByteArrayLen = cacheLineSize // + (8 * 2) // - (8 * 1) -) - -var ( - errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct" - errstrCannotDecodeIntoNil = "cannot decode into nil" - - errmsgExpandSliceOverflow = "expand slice: slice overflow" - errmsgExpandSliceCannotChange = "expand slice: cannot change" - - errDecoderNotInitialized = errors.New("Decoder not initialized") - - errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read") - errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read") - errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown") - errMaxDepthExceeded = errors.New("maximum decoding depth exceeded") -) - -/* - // decReader abstracts the reading source, allowing implementations that can // read from an io.Reader or directly off a byte slice with zero-copying. -// -// Deprecated: Use decReaderSwitch instead. type decReader interface { - unreadn1() - // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR - // just return a view of the []byte being decoded from. - // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. - readx(n int) []byte + readn(n int) []byte readb([]byte) readn1() uint8 - numread() uint // number of bytes read - track() - stopTrack() []byte - - // skip will skip any byte that matches, and return the first non-matching byte - skip(accept *bitset256) (token byte) - // readTo will read any byte that matches, stopping once no-longer matching. - readTo(in []byte, accept *bitset256) (out []byte) - // readUntil will read, only stopping once it matches the 'stop' byte. - readUntil(in []byte, stop byte) (out []byte) + readUint16() uint16 + readUint32() uint32 + readUint64() uint64 } -*/ - type decDriver interface { - // this will check if the next token is a break. - CheckBreak() bool - // TryDecodeAsNil tries to decode as nil. - // Note: TryDecodeAsNil should be careful not to share any temporary []byte with - // the rest of the decDriver. This is because sometimes, we optimize by holding onto - // a transient []byte, and ensuring the only other call we make to the decDriver - // during that time is maybe a TryDecodeAsNil() call. - TryDecodeAsNil() bool - // ContainerType returns one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. - ContainerType() (vt valueType) - // IsBuiltinType(rt uintptr) bool - - // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. - // For maps and arrays, it will not do the decoding in-band, but will signal - // the decoder, so that is done later, by setting the decNaked.valueType field. - // - // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - // for extensions, DecodeNaked must read the tag and the []byte if it exists. - // if the []byte is not read, then kInterfaceNaked will treat it as a Handle - // that stores the subsequent value in-band, and complete reading the RawExt. - // - // extensions should also use readx to decode them, for efficiency. - // kInterface will extract the detached byte slice if it has to pass it outside its realm. - DecodeNaked() - - // Deprecated: use DecodeInt64 and DecodeUint64 instead - // DecodeInt(bitsize uint8) (i int64) - // DecodeUint(bitsize uint8) (ui uint64) - - DecodeInt64() (i int64) - DecodeUint64() (ui uint64) - - DecodeFloat64() (f float64) - DecodeBool() (b bool) - // DecodeString can also decode symbols. - // It looks redundant as DecodeBytes is available. - // However, some codecs (e.g. binc) support symbols and can - // return a pre-stored string value, meaning that it can bypass - // the cost of []byte->string conversion. - DecodeString() (s string) - DecodeStringAsBytes() (v []byte) - - // DecodeBytes may be called directly, without going through reflection. - // Consequently, it must be designed to handle possible nil. - DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) - // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) - - // decodeExt will decode into a *RawExt or into an extension. - DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) - // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - - DecodeTime() (t time.Time) - - ReadArrayStart() int - ReadArrayElem() - ReadArrayEnd() - ReadMapStart() int - ReadMapElemKey() - ReadMapElemValue() - ReadMapEnd() - - reset() - uncacheRead() -} - -type decodeError struct { - codecError - pos int -} - -func (d decodeError) Error() string { - return fmt.Sprintf("%s decode error [pos %d]: %v", d.name, d.pos, d.err) + initReadNext() + tryDecodeAsNil() bool + currentEncodedType() valueType + isBuiltinType(rt uintptr) bool + decodeBuiltin(rt uintptr, v interface{}) + //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + decodeNaked() (v interface{}, vt valueType, decodeFurther bool) + decodeInt(bitsize uint8) (i int64) + decodeUint(bitsize uint8) (ui uint64) + decodeFloat(chkOverflow32 bool) (f float64) + decodeBool() (b bool) + // decodeString can also decode symbols + decodeString() (s string) + decodeBytes(bs []byte) (bsOut []byte, changed bool) + decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + readMapLen() int + readArrayLen() int } -type decDriverNoopContainerReader struct{} - -func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return } -func (x decDriverNoopContainerReader) ReadArrayElem() {} -func (x decDriverNoopContainerReader) ReadArrayEnd() {} -func (x decDriverNoopContainerReader) ReadMapStart() (v int) { return } -func (x decDriverNoopContainerReader) ReadMapElemKey() {} -func (x decDriverNoopContainerReader) ReadMapElemValue() {} -func (x decDriverNoopContainerReader) ReadMapEnd() {} -func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return } - -// func (x decNoSeparator) uncacheRead() {} - -// DecodeOptions captures configuration options during decode. type DecodeOptions struct { - // MapType specifies type to use during schema-less decoding of a map in the stream. - // If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true, - // else map[interface{}]interface{}. + // An instance of MapType is used during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} MapType reflect.Type - - // SliceType specifies type to use during schema-less decoding of an array in the stream. - // If nil (unset), we default to []interface{} for all formats. + // An instance of SliceType is used during schema-less decoding of an array in the stream. + // If nil, we use []interface{} SliceType reflect.Type - - // MaxInitLen defines the maxinum initial length that we "make" a collection - // (string, slice, map, chan). If 0 or negative, we default to a sensible value - // based on the size of an element in the collection. - // - // For example, when decoding, a stream may say that it has 2^64 elements. - // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash. - // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. - MaxInitLen int - - // ReaderBufferSize is the size of the buffer used when reading. - // - // if > 0, we use a smart buffer internally for performance purposes. - ReaderBufferSize int - - // MaxDepth defines the maximum depth when decoding nested - // maps and slices. If 0 or negative, we default to a suitably large number (currently 1024). - MaxDepth int16 - - // If ErrorIfNoField, return an error when decoding a map + // ErrorIfNoField controls whether an error is returned when decoding a map // from a codec stream into a struct, and no matching struct field is found. ErrorIfNoField bool - - // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. - // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, - // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). - ErrorIfNoArrayExpand bool - - // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). - SignedInteger bool - - // MapValueReset controls how we decode into a map value. - // - // By default, we MAY retrieve the mapping for a key, and then decode into that. - // However, especially with big maps, that retrieval may be expensive and unnecessary - // if the stream already contains all that is necessary to recreate the value. - // - // If true, we will never retrieve the previous mapping, - // but rather decode into a new value and set that in the map. - // - // If false, we will retrieve the previous mapping if necessary e.g. - // the previous mapping is a pointer, or is a struct or array with pre-set state, - // or is an interface. - MapValueReset bool - - // SliceElementReset: on decoding a slice, reset the element to a zero value first. - // - // concern: if the slice already contained some garbage, we will decode into that garbage. - SliceElementReset bool - - // InterfaceReset controls how we decode into an interface. - // - // By default, when we see a field that is an interface{...}, - // or a map with interface{...} value, we will attempt decoding into the - // "contained" value. - // - // However, this prevents us from reading a string into an interface{} - // that formerly contained a number. - // - // If true, we will decode into a new "blank" value, and set that in the interface. - // If false, we will decode into whatever is contained in the interface. - InterfaceReset bool - - // InternString controls interning of strings during decoding. - // - // Some handles, e.g. json, typically will read map keys as strings. - // If the set of keys are finite, it may help reduce allocation to - // look them up from a map (than to allocate them afresh). - // - // Note: Handles will be smart when using the intern functionality. - // Every string should not be interned. - // An excellent use-case for interning is struct field names, - // or map keys where key type is string. - InternString bool - - // PreferArrayOverSlice controls whether to decode to an array or a slice. - // - // This only impacts decoding into a nil interface{}. - // Consequently, it has no effect on codecgen. - // - // *Note*: This only applies if using go1.5 and above, - // as it requires reflect.ArrayOf support which was absent before go1.5. - PreferArrayOverSlice bool - - // DeleteOnNilMapValue controls how to decode a nil value in the stream. - // - // If true, we will delete the mapping of the key. - // Else, just set the mapping to the zero value of the type. - DeleteOnNilMapValue bool - - // RawToString controls how raw bytes in a stream are decoded into a nil interface{}. - // By default, they are decoded as []byte, but can be decoded as string (if configured). - RawToString bool -} - -// ------------------------------------------------ - -type unreadByteStatus uint8 - -// unreadByteStatus goes from -// undefined (when initialized) -- (read) --> canUnread -- (unread) --> canRead ... -const ( - unreadByteUndefined unreadByteStatus = iota - unreadByteCanRead - unreadByteCanUnread -) - -type ioDecReaderCommon struct { - r io.Reader // the reader passed in - - n uint // num read - - l byte // last byte - ls unreadByteStatus // last byte status - trb bool // tracking bytes turned on - _ bool - b [4]byte // tiny buffer for reading single bytes - - tr []byte // tracking bytes read } -func (z *ioDecReaderCommon) reset(r io.Reader) { - z.r = r - z.ls = unreadByteUndefined - z.l, z.n = 0, 0 - z.trb = false - if z.tr != nil { - z.tr = z.tr[:0] - } -} - -func (z *ioDecReaderCommon) numread() uint { - return z.n -} - -func (z *ioDecReaderCommon) track() { - if z.tr != nil { - z.tr = z.tr[:0] - } - z.trb = true -} - -func (z *ioDecReaderCommon) stopTrack() (bs []byte) { - z.trb = false - return z.tr -} - -// ------------------------------------------ +// ------------------------------------ -// ioDecReader is a decReader that reads off an io.Reader. -// -// It also has a fallback implementation of ByteScanner if needed. +// ioDecReader is a decReader that reads off an io.Reader type ioDecReader struct { - ioDecReaderCommon - - rr io.Reader - br io.ByteScanner - - x [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc - _ [1]uint64 // padding -} - -func (z *ioDecReader) reset(r io.Reader) { - z.ioDecReaderCommon.reset(r) - - var ok bool - z.rr = r - z.br, ok = r.(io.ByteScanner) - if !ok { - z.br = z - z.rr = z - } -} - -func (z *ioDecReader) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return - } - var firstByte bool - if z.ls == unreadByteCanRead { - z.ls = unreadByteCanUnread - p[0] = z.l - if len(p) == 1 { - n = 1 - return - } - firstByte = true - p = p[1:] - } - n, err = z.r.Read(p) - if n > 0 { - if err == io.EOF && n == len(p) { - err = nil // read was successful, so postpone EOF (till next time) - } - z.l = p[n-1] - z.ls = unreadByteCanUnread - } - if firstByte { - n++ - } - return -} - -func (z *ioDecReader) ReadByte() (c byte, err error) { - n, err := z.Read(z.b[:1]) - if n == 1 { - c = z.b[0] - if err == io.EOF { - err = nil // read was successful, so postpone EOF (till next time) - } - } - return -} - -func (z *ioDecReader) UnreadByte() (err error) { - switch z.ls { - case unreadByteCanUnread: - z.ls = unreadByteCanRead - case unreadByteCanRead: - err = errDecUnreadByteLastByteNotRead - case unreadByteUndefined: - err = errDecUnreadByteNothingToRead - default: - err = errDecUnreadByteUnknown - } - return + r io.Reader + br io.ByteReader + x [8]byte //temp byte array re-used internally for efficiency } -func (z *ioDecReader) readx(n uint) (bs []byte) { - if n == 0 { +func (z *ioDecReader) readn(n int) (bs []byte) { + if n <= 0 { return } - if n < uint(len(z.x)) { - bs = z.x[:n] - } else { - bs = make([]byte, n) - } - if _, err := decReadFull(z.rr, bs); err != nil { + bs = make([]byte, n) + if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { panic(err) } - z.n += uint(len(bs)) - if z.trb { - z.tr = append(z.tr, bs...) - } return } func (z *ioDecReader) readb(bs []byte) { - if len(bs) == 0 { - return - } - if _, err := decReadFull(z.rr, bs); err != nil { + if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { panic(err) } - z.n += uint(len(bs)) - if z.trb { - z.tr = append(z.tr, bs...) - } } -func (z *ioDecReader) readn1eof() (b uint8, eof bool) { - b, err := z.br.ReadByte() - if err == nil { - z.n++ - if z.trb { - z.tr = append(z.tr, b) +func (z *ioDecReader) readn1() uint8 { + if z.br != nil { + b, err := z.br.ReadByte() + if err != nil { + panic(err) } - } else if err == io.EOF { - eof = true - } else { - panic(err) + return b } - return + z.readb(z.x[:1]) + return z.x[0] } -func (z *ioDecReader) readn1() (b uint8) { - b, err := z.br.ReadByte() - if err == nil { - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - return - } - panic(err) +func (z *ioDecReader) readUint16() uint16 { + z.readb(z.x[:2]) + return bigen.Uint16(z.x[:2]) } -func (z *ioDecReader) skip(accept *bitset256) (token byte) { - var eof bool - // for { - // token, eof = z.readn1eof() - // if eof { - // return - // } - // if accept.isset(token) { - // continue - // } - // return - // } -LOOP: - token, eof = z.readn1eof() - if eof { - return - } - if accept.isset(token) { - goto LOOP - } - return +func (z *ioDecReader) readUint32() uint32 { + z.readb(z.x[:4]) + return bigen.Uint32(z.x[:4]) } -func (z *ioDecReader) readTo(in []byte, accept *bitset256) []byte { - // out = in - - // for { - // token, eof := z.readn1eof() - // if eof { - // return - // } - // if accept.isset(token) { - // out = append(out, token) - // } else { - // z.unreadn1() - // return - // } - // } -LOOP: - token, eof := z.readn1eof() - if eof { - return in - } - if accept.isset(token) { - // out = append(out, token) - in = append(in, token) - goto LOOP - } - z.unreadn1() - return in -} - -func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { - out = in - // for { - // token, eof := z.readn1eof() - // if eof { - // panic(io.EOF) - // } - // out = append(out, token) - // if token == stop { - // return - // } - // } -LOOP: - token, eof := z.readn1eof() - if eof { - panic(io.EOF) - } - out = append(out, token) - if token == stop { - return - } - goto LOOP -} - -//go:noinline -func (z *ioDecReader) unreadn1() { - err := z.br.UnreadByte() - if err != nil { - panic(err) - } - z.n-- - if z.trb { - if l := len(z.tr) - 1; l >= 0 { - z.tr = z.tr[:l] - } - } +func (z *ioDecReader) readUint64() uint64 { + z.readb(z.x[:8]) + return bigen.Uint64(z.x[:8]) } // ------------------------------------ -type bufioDecReader struct { - ioDecReaderCommon - - c uint // cursor - buf []byte - - bytesBufPooler - - // err error - - // Extensions can call Decode() within a current Decode() call. - // We need to know when the top level Decode() call returns, - // so we can decide whether to Release() or not. - calls uint16 // what depth in mustDecode are we in now. - - _ [6]uint8 // padding - - _ [1]uint64 // padding +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available } -func (z *bufioDecReader) reset(r io.Reader, bufsize int) { - z.ioDecReaderCommon.reset(r) - z.c = 0 - z.calls = 0 - if cap(z.buf) >= bufsize { - z.buf = z.buf[:0] - } else { - z.buf = z.bytesBufPooler.get(bufsize)[:0] - // z.buf = make([]byte, 0, bufsize) +func (z *bytesDecReader) consume(n int) (oldcursor int) { + if z.a == 0 { + panic(io.EOF) } -} - -func (z *bufioDecReader) release() { - z.buf = nil - z.bytesBufPooler.end() -} - -func (z *bufioDecReader) readb(p []byte) { - var n = uint(copy(p, z.buf[z.c:])) - z.n += n - z.c += n - if len(p) == int(n) { - if z.trb { - z.tr = append(z.tr, p...) // cost=9 - } - } else { - z.readbFill(p, n) + if n > z.a { + decErr("Trying to read %v bytes. Only %v available", n, z.a) } + // z.checkAvailable(n) + oldcursor = z.c + z.c = oldcursor + n + z.a = z.a - n + return } -//go:noinline - fallback when z.buf is consumed -func (z *bufioDecReader) readbFill(p0 []byte, n uint) { - // at this point, there's nothing in z.buf to read (z.buf is fully consumed) - p := p0[n:] - var n2 uint - var err error - if len(p) > cap(z.buf) { - n2, err = decReadFull(z.r, p) - if err != nil { - panic(err) - } - n += n2 - z.n += n2 - // always keep last byte in z.buf - z.buf = z.buf[:1] - z.buf[0] = p[len(p)-1] - z.c = 1 - if z.trb { - z.tr = append(z.tr, p0[:n]...) - } +func (z *bytesDecReader) readn(n int) (bs []byte) { + if n <= 0 { return } - // z.c is now 0, and len(p) <= cap(z.buf) -LOOP: - // for len(p) > 0 && z.err == nil { - if len(p) > 0 { - z.buf = z.buf[0:cap(z.buf)] - var n1 int - n1, err = z.r.Read(z.buf) - n2 = uint(n1) - if n2 == 0 && err != nil { - panic(err) - } - z.buf = z.buf[:n2] - n2 = uint(copy(p, z.buf)) - z.c = n2 - n += n2 - z.n += n2 - p = p[n2:] - goto LOOP - } - if z.c == 0 { - z.buf = z.buf[:1] - z.buf[0] = p[len(p)-1] - z.c = 1 - } - if z.trb { - z.tr = append(z.tr, p0[:n]...) - } -} - -func (z *bufioDecReader) readn1() (b byte) { - // fast-path, so we elide calling into Read() most of the time - if z.c < uint(len(z.buf)) { - b = z.buf[z.c] - z.c++ - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - } else { // meaning z.c == len(z.buf) or greater ... so need to fill - z.readbFill(z.b[:1], 0) - b = z.b[0] - } + c0 := z.consume(n) + bs = z.b[c0:z.c] return } -func (z *bufioDecReader) unreadn1() { - if z.c == 0 { - panic(errDecUnreadByteNothingToRead) - } - z.c-- - z.n-- - if z.trb { - z.tr = z.tr[:len(z.tr)-1] - } +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readn(len(bs))) } -func (z *bufioDecReader) readx(n uint) (bs []byte) { - if n == 0 { - // return - } else if z.c+n <= uint(len(z.buf)) { - bs = z.buf[z.c : z.c+n] - z.n += n - z.c += n - if z.trb { - z.tr = append(z.tr, bs...) - } - } else { - bs = make([]byte, n) - // n no longer used - can reuse - n = uint(copy(bs, z.buf[z.c:])) - z.n += n - z.c += n - z.readbFill(bs, n) - } - return +func (z *bytesDecReader) readn1() uint8 { + c0 := z.consume(1) + return z.b[c0] } -//go:noinline - track called by Decoder.nextValueBytes() (called by jsonUnmarshal,rawBytes) -func (z *bufioDecReader) doTrack(y uint) { - z.tr = append(z.tr, z.buf[z.c:y]...) // cost=14??? -} +// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits +// creating temp slice variable and copying it to helper function is expensive +// for just 2 bits. -func (z *bufioDecReader) skipLoopFn(i uint) { - z.n += (i - z.c) - 1 - i++ - if z.trb { - // z.tr = append(z.tr, z.buf[z.c:i]...) - z.doTrack(i) - } - z.c = i +func (z *bytesDecReader) readUint16() uint16 { + c0 := z.consume(2) + return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 } -func (z *bufioDecReader) skip(accept *bitset256) (token byte) { - // token, _ = z.search(nil, accept, 0, 1); return - - // for i := z.c; i < len(z.buf); i++ { - // if token = z.buf[i]; !accept.isset(token) { - // z.skipLoopFn(i) - // return - // } - // } - - i := z.c -LOOP: - if i < uint(len(z.buf)) { - // inline z.skipLoopFn(i) and refactor, so cost is within inline budget - token = z.buf[i] - i++ - if accept.isset(token) { - goto LOOP - } - z.n += i - 2 - z.c - if z.trb { - z.doTrack(i) - } - z.c = i - return - } - return z.skipFill(accept) +func (z *bytesDecReader) readUint32() uint32 { + c0 := z.consume(4) + return bigen.Uint32(z.b[c0:z.c]) } -func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) { - z.n += uint(len(z.buf)) - z.c - if z.trb { - z.tr = append(z.tr, z.buf[z.c:]...) - } - var n2 int - var err error - for { - z.c = 0 - z.buf = z.buf[0:cap(z.buf)] - n2, err = z.r.Read(z.buf) - if n2 == 0 && err != nil { - panic(err) - } - z.buf = z.buf[:n2] - var i int - for i, token = range z.buf { - if !accept.isset(token) { - z.skipLoopFn(uint(i)) - return - } - } - // for i := 0; i < n2; i++ { - // if token = z.buf[i]; !accept.isset(token) { - // z.skipLoopFn(i) - // return - // } - // } - z.n += uint(n2) - if z.trb { - z.tr = append(z.tr, z.buf...) - } - } +func (z *bytesDecReader) readUint64() uint64 { + c0 := z.consume(8) + return bigen.Uint64(z.b[c0:z.c]) } -func (z *bufioDecReader) readToLoopFn(i uint, out0 []byte) (out []byte) { - // out0 is never nil - z.n += (i - z.c) - 1 - out = append(out0, z.buf[z.c:i]...) - if z.trb { - z.doTrack(i) - } - z.c = i - return -} +// ------------------------------------ -func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { - // _, out = z.search(in, accept, 0, 2); return - - // for i := z.c; i < len(z.buf); i++ { - // if !accept.isset(z.buf[i]) { - // return z.readToLoopFn(i, nil) - // } - // } - - i := z.c -LOOP: - if i < uint(len(z.buf)) { - if !accept.isset(z.buf[i]) { - // return z.readToLoopFn(i, nil) - // inline readToLoopFn here (for performance) - z.n += (i - z.c) - 1 - out = z.buf[z.c:i] - if z.trb { - z.doTrack(i) - } - z.c = i - return - } - i++ - goto LOOP - } - return z.readToFill(in, accept) +// decFnInfo has methods for registering handling decoding of a specific type +// based on some characteristics (builtin, extension, reflect Kind, etc) +type decFnInfo struct { + ti *typeInfo + d *Decoder + dd decDriver + xfFn func(reflect.Value, []byte) error + xfTag byte + array bool } -func (z *bufioDecReader) readToFill(in []byte, accept *bitset256) (out []byte) { - z.n += uint(len(z.buf)) - z.c - out = append(in, z.buf[z.c:]...) - if z.trb { - z.tr = append(z.tr, z.buf[z.c:]...) - } - var n2 int - var err error - for { - z.c = 0 - z.buf = z.buf[0:cap(z.buf)] - n2, err = z.r.Read(z.buf) - if n2 == 0 && err != nil { - if err == io.EOF { - return // readTo should read until it matches or end is reached - } - panic(err) - } - z.buf = z.buf[:n2] - for i, token := range z.buf { - if !accept.isset(token) { - return z.readToLoopFn(uint(i), out) - } - } - // for i := 0; i < n2; i++ { - // if !accept.isset(z.buf[i]) { - // return z.readToLoopFn(i, out) - // } - // } - out = append(out, z.buf...) - z.n += uint(n2) - if z.trb { - z.tr = append(z.tr, z.buf...) - } - } +func (f *decFnInfo) builtin(rv reflect.Value) { + f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) } -func (z *bufioDecReader) readUntilLoopFn(i uint, out0 []byte) (out []byte) { - z.n += (i - z.c) - 1 - i++ - out = append(out0, z.buf[z.c:i]...) - if z.trb { - // z.tr = append(z.tr, z.buf[z.c:i]...) - z.doTrack(i) - } - z.c = i - return +func (f *decFnInfo) rawExt(rv reflect.Value) { + xtag, xbs := f.dd.decodeExt(false, 0) + rv.Field(0).SetUint(uint64(xtag)) + rv.Field(1).SetBytes(xbs) } -func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { - // _, out = z.search(in, nil, stop, 4); return - - // for i := z.c; i < len(z.buf); i++ { - // if z.buf[i] == stop { - // return z.readUntilLoopFn(i, nil) - // } - // } - - i := z.c -LOOP: - if i < uint(len(z.buf)) { - if z.buf[i] == stop { - // inline readUntilLoopFn - // return z.readUntilLoopFn(i, nil) - z.n += (i - z.c) - 1 - i++ - out = z.buf[z.c:i] - if z.trb { - z.doTrack(i) - } - z.c = i - return - } - i++ - goto LOOP +func (f *decFnInfo) ext(rv reflect.Value) { + _, xbs := f.dd.decodeExt(true, f.xfTag) + if fnerr := f.xfFn(rv, xbs); fnerr != nil { + panic(fnerr) } - return z.readUntilFill(in, stop) } -func (z *bufioDecReader) readUntilFill(in []byte, stop byte) (out []byte) { - z.n += uint(len(z.buf)) - z.c - out = append(in, z.buf[z.c:]...) - if z.trb { - z.tr = append(z.tr, z.buf[z.c:]...) - } - var n1 int - var n2 uint - var err error - for { - z.c = 0 - z.buf = z.buf[0:cap(z.buf)] - n1, err = z.r.Read(z.buf) - n2 = uint(n1) - if n2 == 0 && err != nil { - panic(err) - } - z.buf = z.buf[:n2] - for i, token := range z.buf { - if token == stop { - return z.readUntilLoopFn(uint(i), out) +func (f *decFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryUnmarshaler + if f.ti.unmIndir == -1 { + bm = rv.Addr().Interface().(binaryUnmarshaler) + } else if f.ti.unmIndir == 0 { + bm = rv.Interface().(binaryUnmarshaler) + } else { + for j, k := int8(0), f.ti.unmIndir; j < k; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) } + rv = rv.Elem() } - // for i := 0; i < n2; i++ { - // if z.buf[i] == stop { - // return z.readUntilLoopFn(i, out) - // } - // } - out = append(out, z.buf...) - z.n += n2 - if z.trb { - z.tr = append(z.tr, z.buf...) - } + bm = rv.Interface().(binaryUnmarshaler) } -} - -// ------------------------------------ - -var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read") - -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c uint // cursor - t uint // track start - // a int // available -} - -func (z *bytesDecReader) reset(in []byte) { - z.b = in - // z.a = len(in) - z.c = 0 - z.t = 0 -} - -func (z *bytesDecReader) numread() uint { - return z.c -} - -func (z *bytesDecReader) unreadn1() { - if z.c == 0 || len(z.b) == 0 { - panic(errBytesDecReaderCannotUnread) - } - z.c-- - // z.a++ -} - -func (z *bytesDecReader) readx(n uint) (bs []byte) { - // slicing from a non-constant start position is more expensive, - // as more computation is required to decipher the pointer start position. - // However, we do it only once, and it's better than reslicing both z.b and return value. - - // if n <= 0 { - // } else if z.a == 0 { - // panic(io.EOF) - // } else if n > z.a { - // panic(io.ErrUnexpectedEOF) - // } else { - // c0 := z.c - // z.c = c0 + n - // z.a = z.a - n - // bs = z.b[c0:z.c] - // } - // return - - if n != 0 { - z.c += n - if z.c > uint(len(z.b)) { - z.c = uint(len(z.b)) - panic(io.EOF) - } - bs = z.b[z.c-n : z.c] + xbs, _ := f.dd.decodeBytes(nil) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) } - return - - // if n == 0 { - // } else if z.c+n > uint(len(z.b)) { - // z.c = uint(len(z.b)) - // panic(io.EOF) - // } else { - // z.c += n - // bs = z.b[z.c-n : z.c] - // } - // return - - // if n == 0 { - // return - // } - // if z.c == uint(len(z.b)) { - // panic(io.EOF) - // } - // if z.c+n > uint(len(z.b)) { - // panic(io.ErrUnexpectedEOF) - // } - // // z.a -= n - // z.c += n - // return z.b[z.c-n : z.c] } -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readx(uint(len(bs)))) +func (f *decFnInfo) kErr(rv reflect.Value) { + decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) } -func (z *bytesDecReader) readn1() (v uint8) { - if z.c == uint(len(z.b)) { - panic(io.EOF) - } - v = z.b[z.c] - z.c++ - // z.a-- - return -} - -// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { -// if z.a == 0 { -// eof = true -// return -// } -// v = z.b[z.c] -// z.c++ -// z.a-- -// return -// } - -func (z *bytesDecReader) skip(accept *bitset256) (token byte) { - i := z.c - // if i == len(z.b) { - // goto END - // // panic(io.EOF) - // } - - // Replace loop with goto construct, so that this can be inlined - // for i := z.c; i < blen; i++ { - // if !accept.isset(z.b[i]) { - // token = z.b[i] - // i++ - // z.a -= (i - z.c) - // z.c = i - // return - // } - // } - - // i := z.c -LOOP: - if i < uint(len(z.b)) { - token = z.b[i] - i++ - if accept.isset(token) { - goto LOOP - } - // z.a -= (i - z.c) - z.c = i - return - } - // END: - panic(io.EOF) - // // z.a = 0 - // z.c = blen - // return +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.dd.decodeString()) } -func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { - return z.readToNoInput(accept) +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.dd.decodeBool()) } -func (z *bytesDecReader) readToNoInput(accept *bitset256) (out []byte) { - i := z.c - if i == uint(len(z.b)) { - panic(io.EOF) - } - - // Replace loop with goto construct, so that this can be inlined - // for i := z.c; i < blen; i++ { - // if !accept.isset(z.b[i]) { - // out = z.b[z.c:i] - // z.a -= (i - z.c) - // z.c = i - // return - // } - // } - // out = z.b[z.c:] - // z.a, z.c = 0, blen - // return - - // i := z.c - // LOOP: - // if i < blen { - // if accept.isset(z.b[i]) { - // i++ - // goto LOOP - // } - // out = z.b[z.c:i] - // z.a -= (i - z.c) - // z.c = i - // return - // } - // out = z.b[z.c:] - // // z.a, z.c = 0, blen - // z.a = 0 - // z.c = blen - // return - - // c := i -LOOP: - if i < uint(len(z.b)) { - if accept.isset(z.b[i]) { - i++ - goto LOOP - } - } - - out = z.b[z.c:i] - // z.a -= (i - z.c) - z.c = i - return // z.b[c:i] - // z.c, i = i, z.c - // return z.b[i:z.c] +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(intBitsize)) } -func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { - return z.readUntilNoInput(stop) +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(64)) } -func (z *bytesDecReader) readUntilNoInput(stop byte) (out []byte) { - i := z.c - // if i == len(z.b) { - // panic(io.EOF) - // } - - // Replace loop with goto construct, so that this can be inlined - // for i := z.c; i < blen; i++ { - // if z.b[i] == stop { - // i++ - // out = z.b[z.c:i] - // z.a -= (i - z.c) - // z.c = i - // return - // } - // } -LOOP: - if i < uint(len(z.b)) { - if z.b[i] == stop { - i++ - out = z.b[z.c:i] - // z.a -= (i - z.c) - z.c = i - return - } - i++ - goto LOOP - } - // z.a = 0 - // z.c = blen - panic(io.EOF) +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(32)) } -func (z *bytesDecReader) track() { - z.t = z.c +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(8)) } -func (z *bytesDecReader) stopTrack() (bs []byte) { - return z.b[z.t:z.c] +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(16)) } -// ---------------------------------------- - -// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { -// d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) -// } - -func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { - d.d.DecodeExt(rv2i(rv), 0, nil) +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(true)) } -func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { - d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(false)) } -func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { - rv2i(rv).(Selfer).CodecDecodeSelf(d) +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(8)) } -func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { - bm := rv2i(rv).(encoding.BinaryUnmarshaler) - xbs := d.d.DecodeBytes(nil, true) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) - } +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(64)) } -func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { - tm := rv2i(rv).(encoding.TextUnmarshaler) - fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) - if fnerr != nil { - panic(fnerr) - } +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(uintBitsize)) } -func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { - tm := rv2i(rv).(jsonUnmarshaler) - // bs := d.d.DecodeBytes(d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(32)) } -func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { - d.errorf("no decoding function defined for kind %v", rv.Kind()) +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(16)) } -// var kIntfCtr uint64 +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } -func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { - // nil interface: - // use some hieristics to decode it appropriately - // based on the detected next value in the stream. - n := d.naked() - d.d.DecodeNaked() - if n.v == valueTypeNil { +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + if !rv.IsNil() { + f.d.decodeValue(rv.Elem()) return } - // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). - if f.ti.numMeth > 0 { - d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + // nil interface: + // use some hieristics to set the nil interface to an + // appropriate value based on the first byte read (byte descriptor bd) + v, vt, decodeFurther := f.dd.decodeNaked() + if vt == valueTypeNil { return } - // var useRvn bool - switch n.v { + // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) + // if non-nil value in stream. + if num := f.ti.rt.NumMethod(); num > 0 { + decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", + f.ti.rt, num) + } + var rvn reflect.Value + var useRvn bool + switch vt { case valueTypeMap: - // if json, default to a map type with string keys - mtid := d.mtid - if mtid == 0 { - if d.jsms { - mtid = mapStrIntfTypId - } else { - mtid = mapIntfIntfTypId - } - } - if mtid == mapIntfIntfTypId { - var v2 map[interface{}]interface{} - d.decode(&v2) - rvn = reflect.ValueOf(&v2).Elem() - } else if mtid == mapStrIntfTypId { // for json performance - var v2 map[string]interface{} - d.decode(&v2) - rvn = reflect.ValueOf(&v2).Elem() + if f.d.h.MapType == nil { + var m2 map[interface{}]interface{} + v = &m2 } else { - if d.mtr { - rvn = reflect.New(d.h.MapType) - d.decode(rv2i(rvn)) - rvn = rvn.Elem() - } else { - rvn = reflect.New(d.h.MapType).Elem() - d.decodeValue(rvn, nil, true) - } + rvn = reflect.New(f.d.h.MapType).Elem() + useRvn = true } case valueTypeArray: - if d.stid == 0 || d.stid == intfSliceTypId { - var v2 []interface{} - d.decode(&v2) - rvn = reflect.ValueOf(&v2).Elem() - if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { - rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem() - reflect.Copy(rvn2, rvn) - rvn = rvn2 - } + if f.d.h.SliceType == nil { + var m2 []interface{} + v = &m2 } else { - if d.str { - rvn = reflect.New(d.h.SliceType) - d.decode(rv2i(rvn)) - rvn = rvn.Elem() - } else { - rvn = reflect.New(d.h.SliceType).Elem() - d.decodeValue(rvn, nil, true) - } + rvn = reflect.New(f.d.h.SliceType).Elem() + useRvn = true } case valueTypeExt: - var v interface{} - tag, bytes := n.u, n.l // calling decode below might taint the values - if bytes == nil { - d.decode(&v) - } - bfn := d.h.getExtForTag(tag) + re := v.(*RawExt) + var bfn func(reflect.Value, []byte) error + rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) if bfn == nil { - var re RawExt - re.Tag = tag - re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) - re.Value = v - rvn = reflect.ValueOf(&re).Elem() - } else { - rvnA := reflect.New(bfn.rt) - if bytes != nil { - bfn.ext.ReadExt(rv2i(rvnA), bytes) - } else { - bfn.ext.UpdateExt(rv2i(rvnA), v) - } - rvn = rvnA.Elem() - } - case valueTypeNil: - // no-op - case valueTypeInt: - rvn = n.ri() - case valueTypeUint: - rvn = n.ru() - case valueTypeFloat: - rvn = n.rf() - case valueTypeBool: - rvn = n.rb() - case valueTypeString, valueTypeSymbol: - rvn = n.rs() - case valueTypeBytes: - rvn = n.rl() - case valueTypeTime: - rvn = n.rt() - default: - panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v) - } - return -} - -func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { - // Note: - // A consequence of how kInterface works, is that - // if an interface already contains something, we try - // to decode into what was there before. - // We do not replace with a generic value (as got from decodeNaked). - - // every interface passed here MUST be settable. - var rvn reflect.Value - if rv.IsNil() || d.h.InterfaceReset { - // check if mapping to a type: if so, initialize it and move on - rvn = d.h.intf2impl(f.ti.rtid) - if rvn.IsValid() { - rv.Set(rvn) - } else { - rvn = d.kInterfaceNaked(f) - if rvn.IsValid() { - rv.Set(rvn) - } else if d.h.InterfaceReset { - // reset to zero value based on current type in there. - rv.Set(reflect.Zero(rv.Elem().Type())) - } - return + rvn = reflect.ValueOf(*re) + } else if fnerr := bfn(rvn, re.Data); fnerr != nil { + panic(fnerr) } - } else { - // now we have a non-nil interface value, meaning it contains a type - rvn = rv.Elem() - } - if d.d.TryDecodeAsNil() { - rv.Set(reflect.Zero(rvn.Type())) + rv.Set(rvn) return } - - // Note: interface{} is settable, but underlying type may not be. - // Consequently, we MAY have to create a decodable value out of the underlying value, - // decode into it, and reset the interface itself. - // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) - - rvn2, canDecode := isDecodeable(rvn) - if canDecode { - d.decodeValue(rvn2, nil, true) - return + if decodeFurther { + if useRvn { + f.d.decodeValue(rvn) + } else if v != nil { + // this v is a pointer, so we need to dereference it when done + f.d.decode(v) + rvn = reflect.ValueOf(v).Elem() + useRvn = true + } } - - rvn2 = reflect.New(rvn.Type()).Elem() - rvn2.Set(rvn) - d.decodeValue(rvn2, nil, true) - rv.Set(rvn2) -} - -func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) { - // use if-else-if, not switch (which compiles to binary-search) - // since keyType is typically valueTypeString, branch prediction is pretty good. - - if keyType == valueTypeString { - rvkencname = dd.DecodeStringAsBytes() - } else if keyType == valueTypeInt { - rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10) - } else if keyType == valueTypeUint { - rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10) - } else if keyType == valueTypeFloat { - rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64) - } else { - rvkencname = dd.DecodeStringAsBytes() + if useRvn { + rv.Set(rvn) + } else if v != nil { + rv.Set(reflect.ValueOf(v)) } - return rvkencname } -func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { +func (f *decFnInfo) kStruct(rv reflect.Value) { fti := f.ti - dd := d.d - elemsep := d.esep - sfn := structFieldNode{v: rv, update: true} - ctyp := dd.ContainerType() - var mf MissingFielder - if fti.mf { - mf = rv2i(rv).(MissingFielder) - } else if fti.mfp { - mf = rv2i(rv.Addr()).(MissingFielder) - } - if ctyp == valueTypeMap { - containerLen := dd.ReadMapStart() + if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { + containerLen := f.dd.readMapLen() if containerLen == 0 { - dd.ReadMapEnd() return } - d.depthIncr() - tisfi := fti.sfiSort - hasLen := containerLen >= 0 - - var rvkencname []byte - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if elemsep { - dd.ReadMapElemKey() - } - rvkencname = decStructFieldKey(dd, fti.keyType, &d.b) - if elemsep { - dd.ReadMapElemValue() - } + tisfi := fti.sfi + for j := 0; j < containerLen; j++ { + // var rvkencname string + // ddecode(&rvkencname) + f.dd.initReadNext() + rvkencname := f.dd.decodeString() + // rvksi := ti.getForEncName(rvkencname) if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) + sfik := tisfi[k] + if sfik.i != -1 { + f.d.decodeValue(rv.Field(int(sfik.i))) } else { - d.decodeValue(sfn.field(si), nil, true) - } - } else if mf != nil { - // store rvkencname in new []byte, as it previously shares Decoder.b, which is used in decode - name2 := rvkencname - rvkencname = make([]byte, len(rvkencname)) - copy(rvkencname, name2) - - var f interface{} - // xdebugf("kStruct: mf != nil: before decode: rvkencname: %s", rvkencname) - d.decode(&f) - // xdebugf("kStruct: mf != nil: after decode: rvkencname: %s", rvkencname) - if !mf.CodecMissingField(rvkencname, f) && d.h.ErrorIfNoField { - d.errorf("no matching struct field found when decoding stream map with key: %s ", - stringView(rvkencname)) + f.d.decEmbeddedField(rv, sfik.is) } + // f.d.decodeValue(ti.field(k, rv)) } else { - d.structFieldNotFound(-1, stringView(rvkencname)) + if f.d.h.ErrorIfNoField { + decErr("No matching struct field found when decoding stream map with key: %v", + rvkencname) + } else { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } } - // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop } - dd.ReadMapEnd() - d.depthDecr() - } else if ctyp == valueTypeArray { - containerLen := dd.ReadArrayStart() + } else if currEncodedType == valueTypeArray { + containerLen := f.dd.readArrayLen() if containerLen == 0 { - dd.ReadArrayEnd() return } - d.depthIncr() - // Not much gain from doing it two ways for array. - // Arrays are not used as much for structs. - hasLen := containerLen >= 0 - var checkbreak bool - for j, si := range fti.sfiSrc { - if hasLen && j == containerLen { - break - } - if !hasLen && dd.CheckBreak() { - checkbreak = true + for j, si := range fti.sfip { + if j == containerLen { break } - if elemsep { - dd.ReadArrayElem() - } - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) + if si.i != -1 { + f.d.decodeValue(rv.Field(int(si.i))) } else { - d.decodeValue(sfn.field(si), nil, true) + f.d.decEmbeddedField(rv, si.is) } } - if (hasLen && containerLen > len(fti.sfiSrc)) || (!hasLen && !checkbreak) { + if containerLen > len(fti.sfip) { // read remaining values and throw away - for j := len(fti.sfiSrc); ; j++ { - if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { - break - } - if elemsep { - dd.ReadArrayElem() - } - d.structFieldNotFound(j, "") + for j := len(fti.sfip); j < containerLen; j++ { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) } } - dd.ReadArrayEnd() - d.depthDecr() } else { - d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct) - return + decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", + currEncodedType) } } -func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { +func (f *decFnInfo) kSlice(rv reflect.Value) { // A slice can be set from a map or array in stream. - // This way, the order can be kept (as order is lost with map). - ti := f.ti - if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 { - d.errorf("receive-only channel cannot be decoded") - } - dd := d.d - rtelem0 := ti.elem - ctyp := dd.ContainerType() - if ctyp == valueTypeBytes || ctyp == valueTypeString { - // you can only decode bytes or string in the stream into a slice or array of bytes - if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { - d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) - } - if f.seq == seqTypeChan { - bs2 := dd.DecodeBytes(nil, true) - irv := rv2i(rv) - ch, ok := irv.(chan<- byte) - if !ok { - ch = irv.(chan byte) - } - for _, b := range bs2 { - ch <- b - } - } else { - rvbs := rv.Bytes() - bs2 := dd.DecodeBytes(rvbs, false) - // if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { - if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) { - if rv.CanSet() { - rv.SetBytes(bs2) - } else if len(rvbs) > 0 && len(bs2) > 0 { - copy(rvbs, bs2) - } - } - } - return - } - - // array := f.seq == seqTypeChan + currEncodedType := f.dd.currentEncodedType() - slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - - // an array can never return a nil slice. so no need to check f.array here. - if containerLenS == 0 { - if rv.CanSet() { - if f.seq == seqTypeSlice { - if rv.IsNil() { - rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) - } else { - rv.SetLen(0) - } - } else if f.seq == seqTypeChan { - if rv.IsNil() { - rv.Set(reflect.MakeChan(ti.rt, 0)) - } + switch currEncodedType { + case valueTypeBytes, valueTypeString: + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { + rv.SetBytes(bs2) } - } - slh.End() - return - } - - d.depthIncr() - - rtelem0Size := int(rtelem0.Size()) - rtElem0Kind := rtelem0.Kind() - rtelem0Mut := !isImmutableKind(rtElem0Kind) - rtelem := rtelem0 - rtelemkind := rtelem.Kind() - for rtelemkind == reflect.Ptr { - rtelem = rtelem.Elem() - rtelemkind = rtelem.Kind() - } - - var fn *codecFn - - var rvCanset = rv.CanSet() - var rvChanged bool - var rv0 = rv - var rv9 reflect.Value - - rvlen := rv.Len() - rvcap := rv.Cap() - hasLen := containerLenS > 0 - if hasLen && f.seq == seqTypeSlice { - if containerLenS > rvcap { - oldRvlenGtZero := rvlen > 0 - rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) - if rvlen <= rvcap { - if rvCanset { - rv.SetLen(rvlen) - } - } else if rvCanset { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvcap = rvlen - rvChanged = true - } else { - d.errorf("cannot decode into non-settable slice") - } - if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { - reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) - } - } else if containerLenS != rvlen { - rvlen = containerLenS - if rvCanset { - rv.SetLen(rvlen) - } - // else { - // rv = rv.Slice(0, rvlen) - // rvChanged = true - // d.errorf("cannot decode into non-settable slice") - // } - } - } - - // consider creating new element once, and just decoding into it. - var rtelem0Zero reflect.Value - var rtelem0ZeroValid bool - var decodeAsNil bool - var j int - - for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { - if hasLen { - rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) - } else if f.seq == seqTypeSlice { - rvlen = decDefSliceCap - } else { - rvlen = decDefChanCap - } - if rvCanset { - if f.seq == seqTypeSlice { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } else { // chan - rv = reflect.MakeChan(ti.rt, rvlen) - rvChanged = true - } - } else { - d.errorf("cannot decode into non-settable slice") - } - } - slh.ElemContainerState(j) - decodeAsNil = dd.TryDecodeAsNil() - if f.seq == seqTypeChan { - if decodeAsNil { - rv.Send(reflect.Zero(rtelem0)) - continue - } - if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { - rv9 = reflect.New(rtelem0).Elem() - } - if fn == nil { - fn = d.h.fn(rtelem, true, true) - } - d.decodeValue(rv9, fn, true) - rv.Send(rv9) - } else { - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= rvlen { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, j+1) - decodeIntoBlank = true - } else { // if f.seq == seqTypeSlice - // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs - var rvcap2 int - var rvErrmsg2 string - rv9, rvcap2, rvChanged, rvErrmsg2 = - expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap) - if rvErrmsg2 != "" { - d.errorf(rvErrmsg2) - } - rvlen++ - if rvChanged { - rv = rv9 - rvcap = rvcap2 - } - } - } - if decodeIntoBlank { - if !decodeAsNil { - d.swallow() - } - } else { - rv9 = rv.Index(j) - if d.h.SliceElementReset || decodeAsNil { - if !rtelem0ZeroValid { - rtelem0ZeroValid = true - rtelem0Zero = reflect.Zero(rtelem0) - } - rv9.Set(rtelem0Zero) - if decodeAsNil { - continue - } - } - - if fn == nil { - fn = d.h.fn(rtelem, true, true) - } - d.decodeValue(rv9, fn, true) - } - } - } - if f.seq == seqTypeSlice { - if j < rvlen { - if rv.CanSet() { - rv.SetLen(j) - } else if rvCanset { - rv = rv.Slice(0, j) - rvChanged = true - } // else { d.errorf("kSlice: cannot change non-settable slice") } - rvlen = j - } else if j == 0 && rv.IsNil() { - if rvCanset { - rv = reflect.MakeSlice(ti.rt, 0, 0) - rvChanged = true - } // else { d.errorf("kSlice: cannot change non-settable slice") } - } - } - slh.End() - - if rvChanged { // infers rvCanset=true, so it can be reset - rv0.Set(rv) - } - - d.depthDecr() -} - -// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { -// // d.decodeValueFn(rv.Slice(0, rv.Len())) -// f.kSlice(rv.Slice(0, rv.Len())) -// } - -func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { - dd := d.d - containerLen := dd.ReadMapStart() - elemsep := d.esep - ti := f.ti - if rv.IsNil() { - rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.key.Size()+ti.elem.Size())) - rv.Set(makeMapReflect(ti.rt, rvlen)) - } - - if containerLen == 0 { - dd.ReadMapEnd() - return - } - - d.depthIncr() - - ktype, vtype := ti.key, ti.elem - ktypeId := rt2id(ktype) - vtypeKind := vtype.Kind() - - var keyFn, valFn *codecFn - var ktypeLo, vtypeLo reflect.Type - - for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { - } - - for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { - } - - var mapGet, mapSet bool - rvvImmut := isImmutableKind(vtypeKind) - if !d.h.MapValueReset { - // if pointer, mapGet = true - // if interface, mapGet = true if !DecodeNakedAlways (else false) - // if builtin, mapGet = false - // else mapGet = true - if vtypeKind == reflect.Ptr { - mapGet = true - } else if vtypeKind == reflect.Interface { - if !d.h.InterfaceReset { - mapGet = true - } - } else if !rvvImmut { - mapGet = true + return } } - var rvk, rvkp, rvv, rvz reflect.Value - rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. - ktypeIsString := ktypeId == stringTypId - ktypeIsIntf := ktypeId == intfTypId - hasLen := containerLen > 0 - var kstrbs []byte - - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if rvkMut || !rvkp.IsValid() { - rvkp = reflect.New(ktype) - rvk = rvkp.Elem() - } - if elemsep { - dd.ReadMapElemKey() - } - // if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block - // // Previously, if a nil key, we just ignored the mapped value and continued. - // // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} - // // to be an empty map. - // // Instead, we treat a nil key as the zero value of the type. - // rvk.Set(reflect.Zero(ktype)) - // } else if ktypeIsString { - if ktypeIsString { - kstrbs = dd.DecodeStringAsBytes() - rvk.SetString(stringView(kstrbs)) - // NOTE: if doing an insert, you MUST use a real string (not stringview) - } else { - if keyFn == nil { - keyFn = d.h.fn(ktypeLo, true, true) - } - d.decodeValue(rvk, keyFn, true) - } - // special case if a byte array. - if ktypeIsIntf { - if rvk2 := rvk.Elem(); rvk2.IsValid() { - if rvk2.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk2.Bytes())) - } else { - rvk = rvk2 - } - } - } - - if elemsep { - dd.ReadMapElemValue() - } - - // Brittle, but OK per TryDecodeAsNil() contract. - // i.e. TryDecodeAsNil never shares slices with other decDriver procedures - if dd.TryDecodeAsNil() { - if ktypeIsString { - rvk.SetString(d.string(kstrbs)) - } - if d.h.DeleteOnNilMapValue { - rv.SetMapIndex(rvk, reflect.Value{}) - } else { - rv.SetMapIndex(rvk, reflect.Zero(vtype)) - } - continue - } - - mapSet = true // set to false if u do a get, and its a non-nil pointer - if mapGet { - // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. - rvv = rv.MapIndex(rvk) - if !rvv.IsValid() { - rvv = reflect.New(vtype).Elem() - } else if vtypeKind == reflect.Ptr { - if rvv.IsNil() { - rvv = reflect.New(vtype).Elem() - } else { - mapSet = false - } - } else if vtypeKind == reflect.Interface { - // not addressable, and thus not settable. - // e MUST create a settable/addressable variant - rvv2 := reflect.New(rvv.Type()).Elem() - if !rvv.IsNil() { - rvv2.Set(rvv) - } - rvv = rvv2 - } - // else it is ~mutable, and we can just decode into it directly - } else if rvvImmut { - if !rvz.IsValid() { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } else { - rvv = reflect.New(vtype).Elem() - } - - // We MUST be done with the stringview of the key, before decoding the value - // so that we don't bastardize the reused byte array. - if mapSet && ktypeIsString { - rvk.SetString(d.string(kstrbs)) - } - if valFn == nil { - valFn = d.h.fn(vtypeLo, true, true) - } - d.decodeValue(rvv, valFn, true) - // d.decodeValueFn(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case intfSliceTypId: + f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) + return + case uint64SliceTypId: + f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) + return + case int64SliceTypId: + f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) + return + case strSliceTypId: + f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) + return } - // if ktypeIsString { - // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop - // } } - dd.ReadMapEnd() + containerLen, containerLenS := decContLens(f.dd, currEncodedType) - d.depthDecr() -} - -// decNaked is used to keep track of the primitives decoded. -// Without it, we would have to decode each primitive and wrap it -// in an interface{}, causing an allocation. -// In this model, the primitives are decoded in a "pseudo-atomic" fashion, -// so we can rest assured that no other decoding happens while these -// primitives are being decoded. -// -// maps and arrays are not handled by this mechanism. -// However, RawExt is, and we accommodate for extensions that decode -// RawExt from DecodeNaked, but need to decode the value subsequently. -// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. -// -// However, decNaked also keeps some arrays of default maps and slices -// used in DecodeNaked. This way, we can get a pointer to it -// without causing a new heap allocation. -// -// kInterfaceNaked will ensure that there is no allocation for the common -// uses. - -type decNaked struct { - // r RawExt // used for RawExt, uint, []byte. - - // primitives below - u uint64 - i int64 - f float64 - l []byte - s string - - // ---- cpu cache line boundary? - t time.Time - b bool - - // state - v valueType - _ [6]bool // padding - - // ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above - // - // _ [3]uint64 // padding -} - -// func (n *decNaked) init() { -// n.ru = reflect.ValueOf(&n.u).Elem() -// n.ri = reflect.ValueOf(&n.i).Elem() -// n.rf = reflect.ValueOf(&n.f).Elem() -// n.rl = reflect.ValueOf(&n.l).Elem() -// n.rs = reflect.ValueOf(&n.s).Elem() -// n.rt = reflect.ValueOf(&n.t).Elem() -// n.rb = reflect.ValueOf(&n.b).Elem() -// // n.rr[] = reflect.ValueOf(&n.) -// } - -// type decNakedPooler struct { -// n *decNaked -// nsp *sync.Pool -// } - -// // naked must be called before each call to .DecodeNaked, as they will use it. -// func (d *decNakedPooler) naked() *decNaked { -// if d.n == nil { -// // consider one of: -// // - get from sync.Pool (if GC is frequent, there's no value here) -// // - new alloc (safest. only init'ed if it a naked decode will be done) -// // - field in Decoder (makes the Decoder struct very big) -// // To support using a decoder where a DecodeNaked is not needed, -// // we prefer #1 or #2. -// // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool -// // d.n.init() -// var v interface{} -// d.nsp, v = pool.decNaked() -// d.n = v.(*decNaked) -// } -// return d.n -// } - -// func (d *decNakedPooler) end() { -// if d.n != nil { -// // if n != nil, then nsp != nil (they are always set together) -// d.nsp.Put(d.n) -// d.n, d.nsp = nil, nil -// } -// } - -// type rtid2rv struct { -// rtid uintptr -// rv reflect.Value -// } - -// -------------- - -type decReaderSwitch struct { - rb bytesDecReader - // ---- cpu cache line boundary? - ri *ioDecReader - bi *bufioDecReader - - mtr, str bool // whether maptype or slicetype are known types - - be bool // is binary encoding - js bool // is json handle - jsms bool // is json handle, and MapKeyAsString - esep bool // has elem separators - - // typ entryType - bytes bool // is bytes reader - bufio bool // is this a bufioDecReader? -} - -// numread, track and stopTrack are always inlined, as they just check int fields, etc. - -/* -func (z *decReaderSwitch) numread() int { - switch z.typ { - case entryTypeBytes: - return z.rb.numread() - case entryTypeIo: - return z.ri.numread() - default: - return z.bi.numread() - } -} -func (z *decReaderSwitch) track() { - switch z.typ { - case entryTypeBytes: - z.rb.track() - case entryTypeIo: - z.ri.track() - default: - z.bi.track() - } -} -func (z *decReaderSwitch) stopTrack() []byte { - switch z.typ { - case entryTypeBytes: - return z.rb.stopTrack() - case entryTypeIo: - return z.ri.stopTrack() - default: - return z.bi.stopTrack() - } -} - -func (z *decReaderSwitch) unreadn1() { - switch z.typ { - case entryTypeBytes: - z.rb.unreadn1() - case entryTypeIo: - z.ri.unreadn1() - default: - z.bi.unreadn1() - } -} -func (z *decReaderSwitch) readx(n int) []byte { - switch z.typ { - case entryTypeBytes: - return z.rb.readx(n) - case entryTypeIo: - return z.ri.readx(n) - default: - return z.bi.readx(n) - } -} -func (z *decReaderSwitch) readb(s []byte) { - switch z.typ { - case entryTypeBytes: - z.rb.readb(s) - case entryTypeIo: - z.ri.readb(s) - default: - z.bi.readb(s) - } -} -func (z *decReaderSwitch) readn1() uint8 { - switch z.typ { - case entryTypeBytes: - return z.rb.readn1() - case entryTypeIo: - return z.ri.readn1() - default: - return z.bi.readn1() - } -} -func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { - switch z.typ { - case entryTypeBytes: - return z.rb.skip(accept) - case entryTypeIo: - return z.ri.skip(accept) - default: - return z.bi.skip(accept) - } -} -func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { - switch z.typ { - case entryTypeBytes: - return z.rb.readTo(in, accept) - case entryTypeIo: - return z.ri.readTo(in, accept) - default: - return z.bi.readTo(in, accept) - } -} -func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { - switch z.typ { - case entryTypeBytes: - return z.rb.readUntil(in, stop) - case entryTypeIo: - return z.ri.readUntil(in, stop) - default: - return z.bi.readUntil(in, stop) - } -} - -*/ - -// the if/else-if/else block is expensive to inline. -// Each node of this construct costs a lot and dominates the budget. -// Best to only do an if fast-path else block (so fast-path is inlined). -// This is irrespective of inlineExtraCallCost set in $GOROOT/src/cmd/compile/internal/gc/inl.go -// -// In decReaderSwitch methods below, we delegate all IO functions into their own methods. -// This allows for the inlining of the common path when z.bytes=true. -// Go 1.12+ supports inlining methods with up to 1 inlined function (or 2 if no other constructs). - -func (z *decReaderSwitch) numread() uint { - if z.bytes { - return z.rb.numread() - } else if z.bufio { - return z.bi.numread() - } else { - return z.ri.numread() - } -} -func (z *decReaderSwitch) track() { - if z.bytes { - z.rb.track() - } else if z.bufio { - z.bi.track() - } else { - z.ri.track() - } -} -func (z *decReaderSwitch) stopTrack() []byte { - if z.bytes { - return z.rb.stopTrack() - } else if z.bufio { - return z.bi.stopTrack() - } else { - return z.ri.stopTrack() - } -} - -// func (z *decReaderSwitch) unreadn1() { -// if z.bytes { -// z.rb.unreadn1() -// } else { -// z.unreadn1IO() -// } -// } -// func (z *decReaderSwitch) unreadn1IO() { -// if z.bufio { -// z.bi.unreadn1() -// } else { -// z.ri.unreadn1() -// } -// } - -func (z *decReaderSwitch) unreadn1() { - if z.bytes { - z.rb.unreadn1() - } else if z.bufio { - z.bi.unreadn1() - } else { - z.ri.unreadn1() // not inlined - } -} - -func (z *decReaderSwitch) readx(n uint) []byte { - if z.bytes { - return z.rb.readx(n) - } - return z.readxIO(n) -} -func (z *decReaderSwitch) readxIO(n uint) []byte { - if z.bufio { - return z.bi.readx(n) - } - return z.ri.readx(n) -} - -func (z *decReaderSwitch) readb(s []byte) { - if z.bytes { - z.rb.readb(s) - } else { - z.readbIO(s) - } -} - -//go:noinline - fallback for io, ensures z.bytes path is inlined -func (z *decReaderSwitch) readbIO(s []byte) { - if z.bufio { - z.bi.readb(s) - } else { - z.ri.readb(s) - } -} - -func (z *decReaderSwitch) readn1() uint8 { - if z.bytes { - return z.rb.readn1() - } - return z.readn1IO() -} -func (z *decReaderSwitch) readn1IO() uint8 { - if z.bufio { - return z.bi.readn1() - } - return z.ri.readn1() -} + // an array can never return a nil slice. so no need to check f.array here. -func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { - if z.bytes { - return z.rb.skip(accept) - } - return z.skipIO(accept) -} -func (z *decReaderSwitch) skipIO(accept *bitset256) (token byte) { - if z.bufio { - return z.bi.skip(accept) + if rv.IsNil() { + rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) } - return z.ri.skip(accept) -} -func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { - if z.bytes { - return z.rb.readToNoInput(accept) // z.rb.readTo(in, accept) + if containerLen == 0 { + return } - return z.readToIO(in, accept) -} -//go:noinline - fallback for io, ensures z.bytes path is inlined -func (z *decReaderSwitch) readToIO(in []byte, accept *bitset256) (out []byte) { - if z.bufio { - return z.bi.readTo(in, accept) - } - return z.ri.readTo(in, accept) -} -func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { - if z.bytes { - return z.rb.readUntilNoInput(stop) + if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { + if f.array { // !rv.CanSet() + decErr(msgDecCannotExpandArr, rvcap, containerLenS) + } + rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) + if rvlen > 0 { + reflect.Copy(rvn, rv) + } + rv.Set(rvn) + } else if containerLenS > rvlen { + rv.SetLen(containerLenS) } - return z.readUntilIO(in, stop) -} -func (z *decReaderSwitch) readUntilIO(in []byte, stop byte) (out []byte) { - if z.bufio { - return z.bi.readUntil(in, stop) + for j := 0; j < containerLenS; j++ { + f.d.decodeValue(rv.Index(j)) } - return z.ri.readUntil(in, stop) } -// Decoder reads and decodes an object from an input stream in a supported format. -// -// Decoder is NOT safe for concurrent use i.e. a Decoder cannot be used -// concurrently in multiple goroutines. -// -// However, as Decoder could be allocation heavy to initialize, a Reset method is provided -// so its state can be reused to decode new input streams repeatedly. -// This is the idiomatic way to use. -type Decoder struct { - panicHdl - // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. - // Try to put things that go together to fit within a cache line (8 words). - - d decDriver - - // NOTE: Decoder shouldn't call it's read methods, - // as the handler MAY need to do some coordination. - r *decReaderSwitch +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} - // bi *bufioDecReader - // cache the mapTypeId and sliceTypeId for faster comparisons - mtid uintptr - stid uintptr +func (f *decFnInfo) kMap(rv reflect.Value) { + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case mapStrIntfTypId: + f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) + return + case mapIntfIntfTypId: + f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) + return + case mapInt64IntfTypId: + f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) + return + case mapUint64IntfTypId: + f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) + return + } + } - hh Handle - h *BasicHandle + containerLen := f.dd.readMapLen() - // ---- cpu cache line boundary? - decReaderSwitch + if rv.IsNil() { + rv.Set(reflect.MakeMap(f.ti.rt)) + } - // ---- cpu cache line boundary? - n decNaked + if containerLen == 0 { + return + } - // cr containerStateRecv - err error + ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + for j := 0; j < containerLen; j++ { + rvk := reflect.New(ktype).Elem() + f.d.decodeValue(rvk) - depth int16 - maxdepth int16 + // special case if a byte array. + // if ktype == intfTyp { + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(string(rvk.Bytes())) + } + } + rvv := rv.MapIndex(rvk) + if !rvv.IsValid() || !rvv.CanSet() { + rvv = reflect.New(vtype).Elem() + } - _ [4]uint8 // padding + f.d.decodeValue(rvv) + rv.SetMapIndex(rvk, rvv) + } +} - is map[string]string // used for interning strings +// ---------------------------------------- - // ---- cpu cache line boundary? - b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers +type decFn struct { + i *decFnInfo + f func(*decFnInfo, reflect.Value) +} - // padding - false sharing help // modify 232 if Decoder struct changes. - // _ [cacheLineSize - 232%cacheLineSize]byte +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + r decReader + d decDriver + h *BasicHandle + f map[uintptr]decFn + x []uintptr + s []decFn } // NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. // -// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle -// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer). +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Reader, bytes.Buffer). func NewDecoder(r io.Reader, h Handle) *Decoder { - d := newDecoder(h) - d.Reset(r) - return d + z := ioDecReader{ + r: r, + } + z.br, _ = r.(io.ByteReader) + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} } // NewDecoderBytes returns a Decoder which efficiently decodes directly // from a byte slice with zero copying. func NewDecoderBytes(in []byte, h Handle) *Decoder { - d := newDecoder(h) - d.ResetBytes(in) - return d -} - -// var defaultDecNaked decNaked - -func newDecoder(h Handle) *Decoder { - d := &Decoder{h: basicHandle(h), err: errDecoderNotInitialized} - d.bytes = true - if useFinalizers { - runtime.SetFinalizer(d, (*Decoder).finalize) - // xdebugf(">>>> new(Decoder) with finalizer") - } - d.r = &d.decReaderSwitch - d.hh = h - d.be = h.isBinary() - // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() - var jh *JsonHandle - jh, d.js = h.(*JsonHandle) - if d.js { - d.jsms = jh.MapKeyAsString - } - d.esep = d.hh.hasElemSeparators() - if d.h.InternString { - d.is = make(map[string]string, 32) - } - d.d = h.newDecDriver(d) - // d.cr, _ = d.d.(containerStateRecv) - return d -} - -func (d *Decoder) resetCommon() { - // d.r = &d.decReaderSwitch - d.d.reset() - d.err = nil - d.depth = 0 - d.maxdepth = d.h.MaxDepth - if d.maxdepth <= 0 { - d.maxdepth = decDefMaxDepth - } - // reset all things which were cached from the Handle, but could change - d.mtid, d.stid = 0, 0 - d.mtr, d.str = false, false - if d.h.MapType != nil { - d.mtid = rt2id(d.h.MapType) - d.mtr = fastpathAV.index(d.mtid) != -1 - } - if d.h.SliceType != nil { - d.stid = rt2id(d.h.SliceType) - d.str = fastpathAV.index(d.stid) != -1 - } -} - -// Reset the Decoder with a new Reader to decode from, -// clearing all state from last run(s). -func (d *Decoder) Reset(r io.Reader) { - if r == nil { - return - } - d.bytes = false - // d.typ = entryTypeUnset - if d.h.ReaderBufferSize > 0 { - if d.bi == nil { - d.bi = new(bufioDecReader) - } - d.bi.reset(r, d.h.ReaderBufferSize) - // d.r = d.bi - // d.typ = entryTypeBufio - d.bufio = true - } else { - // d.ri.x = &d.b - // d.s = d.sa[:0] - if d.ri == nil { - d.ri = new(ioDecReader) - } - d.ri.reset(r) - // d.r = d.ri - // d.typ = entryTypeIo - d.bufio = false - } - d.resetCommon() -} - -// ResetBytes resets the Decoder with a new []byte to decode from, -// clearing all state from last run(s). -func (d *Decoder) ResetBytes(in []byte) { - if in == nil { - return + z := bytesDecReader{ + b: in, + a: len(in), } - d.bytes = true - d.bufio = false - // d.typ = entryTypeBytes - d.rb.reset(in) - // d.r = &d.rb - d.resetCommon() -} - -func (d *Decoder) naked() *decNaked { - return &d.n + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} } // Decode decodes the stream from reader and stores the result in the @@ -2470,8 +609,7 @@ func (d *Decoder) naked() *decNaked { // - Else decode it based on its reflect.Kind // // There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container i.e. the values -// in these containers will not be zero'ed before decoding. +// Decode will typically use the stream contents to UPDATE the container. // - A map can be decoded from a stream map, by updating matching keys. // - A slice can be decoded from a stream array, // by updating the first n elements, where n is length of the stream. @@ -2481,629 +619,430 @@ func (d *Decoder) naked() *decNaked { // - A struct can be decoded from a stream array, // by updating fields as they occur in the struct (by index). // -// This in-place update maintains consistency in the decoding philosophy (i.e. we ALWAYS update -// in place by default). However, the consequence of this is that values in slices or maps -// which are not zero'ed before hand, will have part of the prior values in place after decode -// if the stream doesn't contain an update for those parts. -// -// This in-place update can be disabled by configuring the MapValueReset and SliceElementReset -// decode options available on every handle. -// -// Furthermore, when decoding a stream map or array with length of 0 into a nil map or slice, +// When decoding a stream map or array with length of 0 into a nil map or slice, // we reset the destination map or slice to a zero-length value. // // However, when decoding a stream nil, we reset the destination container // to its "zero" value (e.g. nil for slice/map, etc). // -// Note: we allow nil values in the stream anywhere except for map keys. -// A nil value in the encoded stream where a map key is expected is treated as an error. func (d *Decoder) Decode(v interface{}) (err error) { - // tried to use closure, as runtime optimizes defer with no params. - // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). - // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 - // defer func() { d.deferred(&err) }() - // { x, y := d, &err; defer func() { x.deferred(y) }() } - if d.err != nil { - return d.err - } - if recoverPanicToErr { - defer func() { - if x := recover(); x != nil { - panicValToErr(d, x, &d.err) - err = d.err - } - }() - } - - // defer d.deferred(&err) - d.mustDecode(v) - return -} - -// MustDecode is like Decode, but panics if unable to Decode. -// This provides insight to the code location that triggered the error. -func (d *Decoder) MustDecode(v interface{}) { - if d.err != nil { - panic(d.err) - } - d.mustDecode(v) -} - -// MustDecode is like Decode, but panics if unable to Decode. -// This provides insight to the code location that triggered the error. -func (d *Decoder) mustDecode(v interface{}) { - // TODO: Top-level: ensure that v is a pointer and not nil. - if d.d.TryDecodeAsNil() { - setZero(v) - return - } - if d.bi == nil { - d.decode(v) - return - } - - d.bi.calls++ + defer panicToErr(&err) d.decode(v) - // xprintf.(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) - d.bi.calls-- - if !d.h.ExplicitRelease && d.bi.calls == 0 { - d.bi.release() - } -} - -// func (d *Decoder) deferred(err1 *error) { -// if recoverPanicToErr { -// if x := recover(); x != nil { -// panicValToErr(d, x, err1) -// panicValToErr(d, x, &d.err) -// } -// } -// } - -//go:noinline -- as it is run by finalizer -func (d *Decoder) finalize() { - // xdebugf("finalizing Decoder") - d.Release() + return } -// Release releases shared (pooled) resources. -// -// It is important to call Release() when done with a Decoder, so those resources -// are released instantly for use by subsequently created Decoders. -// -// By default, Release() is automatically called unless the option ExplicitRelease is set. -func (d *Decoder) Release() { - if d.bi != nil { - d.bi.release() - } - // d.decNakedPooler.end() -} +func (d *Decoder) decode(iv interface{}) { + d.d.initReadNext() -// // this is not a smart swallow, as it allocates objects and does unnecessary work. -// func (d *Decoder) swallowViaHammer() { -// var blank interface{} -// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) -// } + switch v := iv.(type) { + case nil: + decErr("Cannot decode into nil.") -func (d *Decoder) swallow() { - // smarter decode that just swallows the content - dd := d.d - if dd.TryDecodeAsNil() { - return - } - elemsep := d.esep - switch dd.ContainerType() { - case valueTypeMap: - containerLen := dd.ReadMapStart() - d.depthIncr() - hasLen := containerLen >= 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} - if elemsep { - dd.ReadMapElemKey() - } - d.swallow() - if elemsep { - dd.ReadMapElemValue() - } - d.swallow() - } - dd.ReadMapEnd() - d.depthDecr() - case valueTypeArray: - containerLen := dd.ReadArrayStart() - d.depthIncr() - hasLen := containerLen >= 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if elemsep { - dd.ReadArrayElem() - } - d.swallow() - } - dd.ReadArrayEnd() - d.depthDecr() - case valueTypeBytes: - dd.DecodeBytes(d.b[:], true) - case valueTypeString: - dd.DecodeStringAsBytes() - default: - // these are all primitives, which we can get from decodeNaked - // if RawExt using Value, complete the processing. - n := d.naked() - dd.DecodeNaked() - if n.v == valueTypeExt && n.l == nil { - var v2 interface{} - d.decode(&v2) - } - } -} + case reflect.Value: + d.chkPtrValue(v) + d.decodeValue(v.Elem()) -func setZero(iv interface{}) { - if iv == nil || definitelyNil(iv) { - return - } - var canDecode bool - switch v := iv.(type) { case *string: - *v = "" + *v = d.d.decodeString() case *bool: - *v = false + *v = d.d.decodeBool() case *int: - *v = 0 + *v = int(d.d.decodeInt(intBitsize)) case *int8: - *v = 0 + *v = int8(d.d.decodeInt(8)) case *int16: - *v = 0 + *v = int16(d.d.decodeInt(16)) case *int32: - *v = 0 + *v = int32(d.d.decodeInt(32)) case *int64: - *v = 0 + *v = d.d.decodeInt(64) case *uint: - *v = 0 + *v = uint(d.d.decodeUint(uintBitsize)) case *uint8: - *v = 0 + *v = uint8(d.d.decodeUint(8)) case *uint16: - *v = 0 + *v = uint16(d.d.decodeUint(16)) case *uint32: - *v = 0 + *v = uint32(d.d.decodeUint(32)) case *uint64: - *v = 0 + *v = d.d.decodeUint(64) case *float32: - *v = 0 + *v = float32(d.d.decodeFloat(true)) case *float64: - *v = 0 - case *[]uint8: - *v = nil - case *Raw: - *v = nil - case *time.Time: - *v = time.Time{} - case reflect.Value: - if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { - v.Set(reflect.Zero(v.Type())) - } // TODO: else drain if chan, clear if map, set all to nil if slice??? + *v = d.d.decodeFloat(false) + case *[]byte: + *v, _ = d.d.decodeBytes(*v) + + case *[]interface{}: + d.decSliceIntf(v, valueTypeInvalid, false) + case *[]uint64: + d.decSliceUint64(v, valueTypeInvalid, false) + case *[]int64: + d.decSliceInt64(v, valueTypeInvalid, false) + case *[]string: + d.decSliceStr(v, valueTypeInvalid, false) + case *map[string]interface{}: + d.decMapStrIntf(v) + case *map[interface{}]interface{}: + d.decMapIntfIntf(v) + case *map[uint64]interface{}: + d.decMapUint64Intf(v) + case *map[int64]interface{}: + d.decMapInt64Intf(v) + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem()) + default: - if !fastpathDecodeSetZeroTypeSwitch(iv) { - v := reflect.ValueOf(iv) - if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { - v.Set(reflect.Zero(v.Type())) - } // TODO: else drain if chan, clear if map, set all to nil if slice??? - } + rv := reflect.ValueOf(iv) + d.chkPtrValue(rv) + d.decodeValue(rv.Elem()) } } -func (d *Decoder) decode(iv interface{}) { - // a switch with only concrete types can be optimized. - // consequently, we deal with nil and interfaces outside the switch. +func (d *Decoder) decodeValue(rv reflect.Value) { + d.d.initReadNext() - if iv == nil { - d.errorstr(errstrCannotDecodeIntoNil) + if d.d.tryDecodeAsNil() { + // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) + if rv.Kind() == reflect.Ptr { + if !rv.IsNil() { + rv.Set(reflect.Zero(rv.Type())) + } + return + } + // for rv.Kind() == reflect.Ptr { + // rv = rv.Elem() + // } + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } return } - switch v := iv.(type) { - // case nil: - // case Selfer: - case reflect.Value: - v = d.ensureDecodeable(v) - d.decodeValue(v, nil, true) - - case *string: - *v = d.d.DecodeString() - case *bool: - *v = d.d.DecodeBool() - case *int: - *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - case *int8: - *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) - case *int16: - *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) - case *int32: - *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - case *int64: - *v = d.d.DecodeInt64() - case *uint: - *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) - case *uint8: - *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - case *uint16: - *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) - case *uint32: - *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) - case *uint64: - *v = d.d.DecodeUint64() - case *float32: - f64 := d.d.DecodeFloat64() - if chkOvf.Float32(f64) { - d.errorf("float32 overflow: %v", f64) - } - *v = float32(f64) - case *float64: - *v = d.d.DecodeFloat64() - case *[]uint8: - *v = d.d.DecodeBytes(*v, false) - case []uint8: - b := d.d.DecodeBytes(v, false) - if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) { - copy(v, b) + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) } - case *time.Time: - *v = d.d.DecodeTime() - case *Raw: - *v = d.rawBytes() + rv = rv.Elem() + } - case *interface{}: - d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true) - // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() - default: - if v, ok := iv.(Selfer); ok { - v.CodecDecodeSelf(d) - } else if !fastpathDecodeTypeSwitch(iv, d) { - v := reflect.ValueOf(iv) - v = d.ensureDecodeable(v) - d.decodeValue(v, nil, false) - // d.decodeValueFallback(v) - } - } -} + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times -func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) { - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - var rvp reflect.Value - var rvpValid bool - if rv.Kind() == reflect.Ptr { - rvpValid = true - for { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rvp = rv - rv = rv.Elem() - if rv.Kind() != reflect.Ptr { + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var fn decFn + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i, v := range d.x { + if v == rtid { + fn, ok = d.s[i], true break } } } - - if fn == nil { - // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer - fn = d.h.fn(rv.Type(), chkAll, true) // chkAll, chkAll) - } - if fn.i.addrD { - if rvpValid { - fn.fd(d, &fn.i, rvp) - } else if rv.CanAddr() { - fn.fd(d, &fn.i, rv.Addr()) - } else if !fn.i.addrF { - fn.fd(d, &fn.i, rv) + if !ok { + // debugf("\tCreating new dec fn for type: %v\n", rt) + fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} + fn.i = &fi + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if d.d.isBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*decFnInfo).ext + } else if supportBinaryMarshal && fi.ti.unm { + fn.f = (*decFnInfo).binaryMarshal } else { - d.errorf("cannot decode into a non-pointer value") + switch rk := rt.Kind(); rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Slice: + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.array = true + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } } - } else { - fn.fd(d, &fn.i, rv) - } - // return rv -} - -func (d *Decoder) structFieldNotFound(index int, rvkencname string) { - // NOTE: rvkencname may be a stringView, so don't pass it to another function. - if d.h.ErrorIfNoField { - if index >= 0 { - d.errorf("no matching struct field found when decoding stream array at index %v", index) - return - } else if rvkencname != "" { - d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) - return + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]decFn, 16) + } + d.f[rtid] = fn + } else { + d.s = append(d.s, fn) + d.x = append(d.x, rtid) } } - d.swallow() -} -func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { - if d.h.ErrorIfNoArrayExpand { - d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) - } -} + fn.f(fn.i, rv) -func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { - switch rv.Kind() { - case reflect.Array: - return rv, rv.CanAddr() - case reflect.Ptr: - if !rv.IsNil() { - return rv.Elem(), true - } - case reflect.Slice, reflect.Chan, reflect.Map: - if !rv.IsNil() { - return rv, true - } - } return } -func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { - // decode can take any reflect.Value that is a inherently addressable i.e. - // - array - // - non-nil chan (we will SEND to it) - // - non-nil slice (we will set its elements) - // - non-nil map (we will put into it) - // - non-nil pointer (we can "update" it) - rv2, canDecode := isDecodeable(rv) - if canDecode { +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { return } if !rv.IsValid() { - d.errorstr(errstrCannotDecodeIntoNil) - return + decErr("Cannot decode into a zero (ie invalid) reflect.Value") } if !rv.CanInterface() { - d.errorf("cannot decode into a value without an interface: %v", rv) - return - } - rvi := rv2i(rv) - rvk := rv.Kind() - d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi) - return -} - -func (d *Decoder) depthIncr() { - d.depth++ - if d.depth >= d.maxdepth { - panic(errMaxDepthExceeded) + decErr("Cannot decode into a value without an interface: %v", rv) } + rvi := rv.Interface() + decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", + rv.Kind(), rvi, rvi) } -func (d *Decoder) depthDecr() { - d.depth-- -} - -// Possibly get an interned version of a string -// -// This should mostly be used for map keys, where the key type is string. -// This is because keys of a map/struct are typically reused across many objects. -func (d *Decoder) string(v []byte) (s string) { - if d.is == nil { - return string(v) // don't return stringView, as we need a real string here. - } - s, ok := d.is[string(v)] // no allocation here, per go implementation - if !ok { - s = string(v) // new allocation here - d.is[s] = s +func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { + // d.decodeValue(rv.FieldByIndex(index)) + // nil pointers may be here; so reproduce FieldByIndex logic + enhancements + for _, j := range index { + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + // If a pointer, it must be a pointer to struct (based on typeInfo contract) + rv = rv.Elem() + } + rv = rv.Field(j) } - return s -} - -// nextValueBytes returns the next value in the stream as a set of bytes. -func (d *Decoder) nextValueBytes() (bs []byte) { - d.d.uncacheRead() - d.r.track() - d.swallow() - bs = d.r.stopTrack() - return -} - -func (d *Decoder) rawBytes() []byte { - // ensure that this is not a view into the bytes - // i.e. make new copy always. - bs := d.nextValueBytes() - bs2 := make([]byte, len(bs)) - copy(bs2, bs) - return bs2 -} - -func (d *Decoder) wrapErr(v interface{}, err *error) { - *err = decodeError{codecError: codecError{name: d.hh.Name(), err: v}, pos: int(d.r.numread())} -} - -// NumBytesRead returns the number of bytes read -func (d *Decoder) NumBytesRead() int { - return int(d.r.numread()) + d.decodeValue(rv) } // -------------------------------------------------- -// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. -// A slice can be set from a map or array in stream. This supports the MapBySlice interface. -type decSliceHelper struct { - d *Decoder - // ct valueType - array bool -} - -func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { - dd := d.d - ctyp := dd.ContainerType() - switch ctyp { - case valueTypeArray: - x.array = true - clen = dd.ReadArrayStart() - case valueTypeMap: - clen = dd.ReadMapStart() * 2 - default: - d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) +// short circuit functions for common maps and slices + +func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]interface{}, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]interface{}, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + d.decode(&s[j]) + } + *v = s +} + +func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]int64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]int64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeInt(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]uint64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]uint64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeUint(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]string, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]string, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeString() + } + *v = s +} + +func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[interface{}]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + var mk interface{} + d.decode(&mk) + // special case if a byte array. + if bv, bok := mk.([]byte); bok { + mk = string(bv) + } + mv := m[mk] + d.decode(&mv) + m[mk] = mv } - // x.ct = ctyp - x.d = d - return } -func (x decSliceHelper) End() { - if x.array { - x.d.d.ReadArrayEnd() - } else { - x.d.d.ReadMapEnd() +func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[int64]interface{}, containerLen) + *v = m } -} - -func (x decSliceHelper) ElemContainerState(index int) { - if x.array { - x.d.d.ReadArrayElem() - } else if index%2 == 0 { - x.d.d.ReadMapElemKey() - } else { - x.d.d.ReadMapElemValue() + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeInt(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv } } -func decByteSlice(r *decReaderSwitch, clen, maxInitLen int, bs []byte) (bsOut []byte) { - if clen == 0 { - return zeroByteSlice - } - if len(bs) == clen { - bsOut = bs - r.readb(bsOut) - } else if cap(bs) >= clen { - bsOut = bs[:clen] - r.readb(bsOut) - } else { - // bsOut = make([]byte, clen) - len2 := decInferLen(clen, maxInitLen, 1) - bsOut = make([]byte, len2) - r.readb(bsOut) - for len2 < clen { - len3 := decInferLen(clen-len2, maxInitLen, 1) - bs3 := bsOut - bsOut = make([]byte, len2+len3) - copy(bsOut, bs3) - r.readb(bsOut[len2:]) - len2 += len3 - } +func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[uint64]interface{}, containerLen) + *v = m } - return -} - -// func decByteSliceZeroCopy(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { -// if _, ok := r.(*bytesDecReader); ok && clen <= maxInitLen { -// return r.readx(clen) -// } -// return decByteSlice(r, clen, maxInitLen, bs) -// } - -func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { - if xlen := len(in); xlen > 0 { - if isBytesReader || xlen <= scratchByteArrayLen { - if cap(dest) >= xlen { - out = dest[:xlen] - } else { - out = make([]byte, xlen) - } - copy(out, in) - return - } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeUint(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv } - return in } -// decInferLen will infer a sensible length, given the following: -// - clen: length wanted. -// - maxlen: max length to be returned. -// if <= 0, it is unset, and we infer it based on the unit size -// - unit: number of bytes for each element of the collection -func decInferLen(clen, maxlen, unit int) (rvlen int) { - // handle when maxlen is not set i.e. <= 0 - if clen <= 0 { - return - } - if unit == 0 { - return clen - } - if maxlen <= 0 { - // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. - // maxlen = 256 * 1024 / unit - // if maxlen < (4 * 1024) { - // maxlen = 4 * 1024 - // } - if unit < (256 / 4) { - maxlen = 256 * 1024 / unit - } else { - maxlen = 4 * 1024 - } +func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[string]interface{}, containerLen) + *v = m } - if clen > maxlen { - rvlen = maxlen - } else { - rvlen = clen + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeString() + mv := m[mk] + d.decode(&mv) + m[mk] = mv } - return } -func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) ( - s2 reflect.Value, scap2 int, changed bool, err string) { - l1 := slen + num // new slice length - if l1 < slen { - err = errmsgExpandSliceOverflow - return - } - if l1 <= scap { - if s.CanSet() { - s.SetLen(l1) - } else if canChange { - s2 = s.Slice(0, l1) - scap2 = scap - changed = true - } else { - err = errmsgExpandSliceCannotChange - return - } - return - } - if !canChange { - err = errmsgExpandSliceCannotChange - return - } - scap2 = growCap(scap, stElemSize, num) - s2 = reflect.MakeSlice(st, l1, scap2) - changed = true - reflect.Copy(s2, s) - return -} +// ---------------------------------------- -func decReadFull(r io.Reader, bs []byte) (n uint, err error) { - var nn int - for n < uint(len(bs)) && err == nil { - nn, err = r.Read(bs[n:]) - if nn > 0 { - if err == io.EOF { - // leave EOF for next time - err = nil - } - n += uint(nn) - } +func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { + if currEncodedType == valueTypeInvalid { + currEncodedType = dd.currentEncodedType() + } + switch currEncodedType { + case valueTypeArray: + containerLen = dd.readArrayLen() + containerLenS = containerLen + case valueTypeMap: + containerLen = dd.readMapLen() + containerLenS = containerLen * 2 + default: + decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", + currEncodedType) } - // xdebugf("decReadFull: len(bs): %v, n: %v, err: %v", len(bs), n, err) - // do not do this - it serves no purpose - // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } return } -func decNakedReadRawBytes(dr decDriver, d *Decoder, n *decNaked, rawToString bool) { - if rawToString { - n.v = valueTypeString - n.s = string(dr.DecodeBytes(d.b[:], true)) - } else { - n.v = valueTypeBytes - n.l = dr.DecodeBytes(nil, false) - } +func decErr(format string, params ...interface{}) { + doPanic(msgTagDec, format, params...) } diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/doc.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/doc.go deleted file mode 100644 index 5c5df9cf..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/doc.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -Package codec provides a -High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library -for binc, msgpack, cbor, json. - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -This package will carefully use 'package unsafe' for performance reasons in specific places. -You can build without unsafe use by passing the safe or appengine tag -i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 4 -go releases e.g. current go release is go 1.12, so we support unsafe use only from -go 1.9+ . This is because supporting unsafe requires knowledge of implementation details. - -For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Support for go1.4 and above, while selectively using newer APIs for later releases - - Excellent code coverage ( > 90% ) - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Careful selected use of 'unsafe' for targeted performance gains. - 100% mode exists where 'unsafe' is not used at all. - - Lock-free (sans mutex) concurrency for scaling to 100's of cores - - In-place updates during decode, with option to zero value in maps and slices prior to decode - - Coerce types where appropriate - e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Support IsZero() bool to determine if a value is a zero value. - Analogous to time.Time.IsZero() bool. - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Mapping a non-interface type to an interface, so we can decode appropriately - into any interface type with a correctly configured non-interface value. - - Encode a struct as an array, and decode struct from an array in the data stream - - Option to encode struct keys as numbers (instead of strings) - (to support structured streams with fields encoded as numeric codes) - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosyncrasies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -Custom Encoding and Decoding - -This package maintains symmetry in the encoding and decoding halfs. -We determine how to encode or decode by walking this decision tree - - - is type a codec.Selfer? - - is there an extension registered for the type? - - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? - - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? - - is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler? - - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc - -This symmetry is important to reduce chances of issues happening because the -encoding and decoding sides are out of sync e.g. decoded via very specific -encoding.TextUnmarshaler but encoded via kind-specific generalized mode. - -Consequently, if a type only defines one-half of the symmetry -(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), -then that type doesn't satisfy the check and we will continue walking down the -decision tree. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. - -The Encoder and Decoder are NOT safe for concurrent use. - -Consequently, the usage model is basically: - - - Create and initialize the Handle before any use. - Once created, DO NOT modify it. - - Multiple Encoders or Decoders can now use the Handle concurrently. - They only read information off the Handle (never write). - - However, each Encoder or Decoder MUST not be used concurrently - - To re-use an Encoder/Decoder, call Reset(...) on it first. - This allows you use state maintained on the Encoder/Decoder. - -Sample usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -Running Tests - -To run tests, use the following: - - go test - -To run the full suite of tests, use the following: - - go test -tags alltests -run Suite - -You can run the tag 'safe' to run tests or build in safe mode. e.g. - - go test -tags safe -run Json - go test -tags "alltests safe" -run Suite - -Running Benchmarks - - cd bench - go test -bench . -benchmem -benchtime 1s - -Please see http://github.com/ugorji/go-codec-bench . - -Managing Binary Size - -This package could add up to 10MB to the size of your binaries. - -This is because we include some a auto-generated file: `fast-path.generated.go` -to help with performance when encoding/decoding slices and maps of -built in numeric, boolean, string and interface{} types. - -You can override this by building (or running tests and benchmarks) -with the tag: `notfastpath`. - - go install -tags notfastpath - go build -tags notfastpath - go test -tags notfastpath - -Be aware that, at least in our representative microbenchmarks for cbor (for example), -we see up to 33% increase in decoding and 50% increase in encoding speeds. -YMMV. - -Caveats - -Struct fields matching the following are ignored during encoding and decoding - - struct tag value set to - - - func, complex numbers, unsafe pointers - - unexported and not embedded - - unexported and embedded and not struct kind - - unexported and embedded pointers (from go1.10) - -Every other field in a struct will be encoded/decoded. - -Embedded fields are encoded as if they exist in the top-level struct, -with some caveats. See Encode documentation. - -*/ -package codec - diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/encode.go index 9d8d1164..4914be0c 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/encode.go +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/encode.go @@ -1,1267 +1,626 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. package codec import ( - "encoding" - "errors" - "fmt" "io" "reflect" - "runtime" - "sort" - "strconv" - "time" ) -// defEncByteBufSize is the default size of []byte used -// for bufio buffer or []byte (when nil passed) -const defEncByteBufSize = 1 << 10 // 4:16, 6:64, 8:256, 10:1024 +const ( + // Some tagging information for error messages. + msgTagEnc = "codec.encoder" + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 +) -var errEncoderNotInitialized = errors.New("Encoder not initialized") +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 -/* +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota -// encWriter abstracts writing to a byte array or to an io.Writer. -// -// -// Deprecated: Use encWriterSwitch instead. + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracting writing to a byte array or to an io.Writer. type encWriter interface { + writeUint16(uint16) + writeUint32(uint32) + writeUint64(uint64) writeb([]byte) writestr(string) writen1(byte) writen2(byte, byte) - end() + atEndOfEncode() } -*/ - // encDriver abstracts the actual codec (binc vs msgpack, etc) type encDriver interface { - EncodeNil() - EncodeInt(i int64) - EncodeUint(i uint64) - EncodeBool(b bool) - EncodeFloat32(f float32) - EncodeFloat64(f float64) - // encodeExtPreamble(xtag byte, length int) - EncodeRawExt(re *RawExt, e *Encoder) - EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) - // Deprecated: use EncodeStringEnc instead - EncodeString(c charEncoding, v string) - // Deprecated: use EncodeStringBytesRaw instead - EncodeStringBytes(c charEncoding, v []byte) - EncodeStringEnc(c charEncoding, v string) // c cannot be cRAW - // EncodeSymbol(v string) - EncodeStringBytesRaw(v []byte) - EncodeTime(time.Time) + isBuiltinType(rt uintptr) bool + encodeBuiltin(rt uintptr, v interface{}) + encodeNil() + encodeInt(i int64) + encodeUint(i uint64) + encodeBool(b bool) + encodeFloat32(f float32) + encodeFloat64(f float64) + encodeExtPreamble(xtag byte, length int) + encodeArrayPreamble(length int) + encodeMapPreamble(length int) + encodeString(c charEncoding, v string) + encodeSymbol(v string) + encodeStringBytes(c charEncoding, v []byte) + //TODO //encBignum(f *big.Int) //encStringRunes(c charEncoding, v []rune) - WriteArrayStart(length int) - WriteArrayElem() - WriteArrayEnd() - WriteMapStart(length int) - WriteMapElemKey() - WriteMapElemValue() - WriteMapEnd() - - reset() - atEndOfEncode() -} - -type encDriverAsis interface { - EncodeAsis(v []byte) -} - -type encodeError struct { - codecError } -func (e encodeError) Error() string { - return fmt.Sprintf("%s encode error: %v", e.name, e.err) +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) } -type encDriverNoopContainerWriter struct{} - -func (encDriverNoopContainerWriter) WriteArrayStart(length int) {} -func (encDriverNoopContainerWriter) WriteArrayElem() {} -func (encDriverNoopContainerWriter) WriteArrayEnd() {} -func (encDriverNoopContainerWriter) WriteMapStart(length int) {} -func (encDriverNoopContainerWriter) WriteMapElemKey() {} -func (encDriverNoopContainerWriter) WriteMapElemValue() {} -func (encDriverNoopContainerWriter) WriteMapEnd() {} -func (encDriverNoopContainerWriter) atEndOfEncode() {} - -type encDriverTrackContainerWriter struct { - c containerState +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) } -func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart } -func (e *encDriverTrackContainerWriter) WriteArrayElem() { e.c = containerArrayElem } -func (e *encDriverTrackContainerWriter) WriteArrayEnd() { e.c = containerArrayEnd } -func (e *encDriverTrackContainerWriter) WriteMapStart(length int) { e.c = containerMapStart } -func (e *encDriverTrackContainerWriter) WriteMapElemKey() { e.c = containerMapKey } -func (e *encDriverTrackContainerWriter) WriteMapElemValue() { e.c = containerMapValue } -func (e *encDriverTrackContainerWriter) WriteMapEnd() { e.c = containerMapEnd } -func (e *encDriverTrackContainerWriter) atEndOfEncode() {} - -// type ioEncWriterWriter interface { -// WriteByte(c byte) error -// WriteString(s string) (n int, err error) -// Write(p []byte) (n int, err error) -// } - -// EncodeOptions captures configuration options during encode. type EncodeOptions struct { - // WriterBufferSize is the size of the buffer used when writing. - // - // if > 0, we use a smart buffer internally for performance purposes. - WriterBufferSize int - - // ChanRecvTimeout is the timeout used when selecting from a chan. - // - // Configuring this controls how we receive from a chan during the encoding process. - // - If ==0, we only consume the elements currently available in the chan. - // - if <0, we consume until the chan is closed. - // - If >0, we consume until this timeout. - ChanRecvTimeout time.Duration - - // StructToArray specifies to encode a struct as an array, and not as a map + // Encode a struct as an array, and not as a map. StructToArray bool - // Canonical representation means that encoding a value will always result in the same - // sequence of bytes. - // - // This only affects maps, as the iteration order for maps is random. - // - // The implementation MAY use the natural sort order for the map keys if possible: - // - // - If there is a natural sort order (ie for number, bool, string or []byte keys), - // then the map keys are first sorted in natural order and then written - // with corresponding map values to the strema. - // - If there is no natural sort order, then the map keys will first be - // encoded into []byte, and then sorted, - // before writing the sorted keys and the corresponding map values to the stream. - // - Canonical bool - - // CheckCircularRef controls whether we check for circular references - // and error fast during an encode. + // AsSymbols defines what should be encoded as symbols. // - // If enabled, an error is received if a pointer to a struct - // references itself either directly or through one of its fields (iteratively). + // Encoding as symbols can reduce the encoded size significantly. // - // This is opt-in, as there may be a performance hit to checking circular references. - CheckCircularRef bool - - // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers - // when checking if a value is empty. + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. // - // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. - RecursiveEmptyCheck bool + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} - // Raw controls whether we encode Raw values. - // This is a "dangerous" option and must be explicitly set. - // If set, we blindly encode Raw values as-is, without checking - // if they are a correct representation of a value in that format. - // If unset, we error out. - Raw bool +// --------------------------------------------- - // StringToRaw controls how strings are encoded. - // - // As a go string is just an (immutable) sequence of bytes, - // it can be encoded either as raw bytes or as a UTF string. - // - // By default, strings are encoded as UTF-8. - // but can be treated as []byte during an encode. - // - // Note that things which we know (by definition) to be UTF-8 - // are ALWAYS encoded as UTF-8 strings. - // These include encoding.TextMarshaler, time.Format calls, struct field names, etc. - StringToRaw bool - - // // AsSymbols defines what should be encoded as symbols. - // // - // // Encoding as symbols can reduce the encoded size significantly. - // // - // // However, during decoding, each string to be encoded as a symbol must - // // be checked to see if it has been seen before. Consequently, encoding time - // // will increase if using symbols, because string comparisons has a clear cost. - // // - // // Sample values: - // // AsSymbolNone - // // AsSymbolAll - // // AsSymbolMapStringKeys - // // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - // AsSymbols AsSymbolFlag +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter } -// --------------------------------------------- +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + _, err = o.w.Write([]byte{c}) + return +} -/* +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + return o.w.Write([]byte(s)) +} -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) } +// ---------------------------------------- + // ioEncWriter implements encWriter and can write to an io.Writer implementation type ioEncWriter struct { - w io.Writer - ww io.Writer - bw io.ByteWriter - sw ioEncStringWriter - fw ioFlusher - b [8]byte + w ioEncWriterWriter + x [8]byte // temp byte array re-used internally for efficiency } -func (z *ioEncWriter) reset(w io.Writer) { - z.w = w - var ok bool - if z.bw, ok = w.(io.ByteWriter); !ok { - z.bw = z - } - if z.sw, ok = w.(ioEncStringWriter); !ok { - z.sw = z - } - z.fw, _ = w.(ioFlusher) - z.ww = w +func (z *ioEncWriter) writeUint16(v uint16) { + bigen.PutUint16(z.x[:2], v) + z.writeb(z.x[:2]) } -func (z *ioEncWriter) WriteByte(b byte) (err error) { - z.b[0] = b - _, err = z.w.Write(z.b[:1]) - return +func (z *ioEncWriter) writeUint32(v uint32) { + bigen.PutUint32(z.x[:4], v) + z.writeb(z.x[:4]) } -func (z *ioEncWriter) WriteString(s string) (n int, err error) { - return z.w.Write(bytesView(s)) +func (z *ioEncWriter) writeUint64(v uint64) { + bigen.PutUint64(z.x[:8], v) + z.writeb(z.x[:8]) } func (z *ioEncWriter) writeb(bs []byte) { - if _, err := z.ww.Write(bs); err != nil { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { panic(err) } + if n != len(bs) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) + } } func (z *ioEncWriter) writestr(s string) { - if _, err := z.sw.WriteString(s); err != nil { + n, err := z.w.WriteString(s) + if err != nil { panic(err) } + if n != len(s) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) + } } func (z *ioEncWriter) writen1(b byte) { - if err := z.bw.WriteByte(b); err != nil { + if err := z.w.WriteByte(b); err != nil { panic(err) } } -func (z *ioEncWriter) writen2(b1, b2 byte) { - var err error - if err = z.bw.WriteByte(b1); err == nil { - if err = z.bw.WriteByte(b2); err == nil { - return - } - } - panic(err) -} - -// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { -// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 -// if _, err := z.ww.Write(z.b[:5]); err != nil { -// panic(err) -// } -// } - -//go:noinline - so *encWriterSwitch.XXX has the bytesEncAppender.XXX inlined -func (z *ioEncWriter) end() { - if z.fw != nil { - if err := z.fw.Flush(); err != nil { - panic(err) - } - } +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) } -*/ +func (z *ioEncWriter) atEndOfEncode() {} -// --------------------------------------------- - -// bufioEncWriter -type bufioEncWriter struct { - buf []byte - w io.Writer - n int - sz int // buf size - - // Extensions can call Encode() within a current Encode() call. - // We need to know when the top level Encode() call returns, - // so we can decide whether to Release() or not. - calls uint16 // what depth in mustDecode are we in now. - - _ [6]uint8 // padding +// ---------------------------------------- - bytesBufPooler - - _ [1]uint64 // padding - // a int - // b [4]byte - // err +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode } -func (z *bufioEncWriter) reset(w io.Writer, bufsize int) { - z.w = w - z.n = 0 - z.calls = 0 - if bufsize <= 0 { - bufsize = defEncByteBufSize - } - z.sz = bufsize - if cap(z.buf) >= bufsize { - z.buf = z.buf[:cap(z.buf)] - } else { - z.buf = z.bytesBufPooler.get(bufsize) - // z.buf = make([]byte, bufsize) - } +func (z *bytesEncWriter) writeUint16(v uint16) { + c := z.grow(2) + z.b[c] = byte(v >> 8) + z.b[c+1] = byte(v) } -func (z *bufioEncWriter) release() { - z.buf = nil - z.bytesBufPooler.end() +func (z *bytesEncWriter) writeUint32(v uint32) { + c := z.grow(4) + z.b[c] = byte(v >> 24) + z.b[c+1] = byte(v >> 16) + z.b[c+2] = byte(v >> 8) + z.b[c+3] = byte(v) } -//go:noinline - flush only called intermittently -func (z *bufioEncWriter) flushErr() (err error) { - n, err := z.w.Write(z.buf[:z.n]) - z.n -= n - if z.n > 0 && err == nil { - err = io.ErrShortWrite - } - if n > 0 && z.n > 0 { - copy(z.buf, z.buf[n:z.n+n]) - } - return err +func (z *bytesEncWriter) writeUint64(v uint64) { + c := z.grow(8) + z.b[c] = byte(v >> 56) + z.b[c+1] = byte(v >> 48) + z.b[c+2] = byte(v >> 40) + z.b[c+3] = byte(v >> 32) + z.b[c+4] = byte(v >> 24) + z.b[c+5] = byte(v >> 16) + z.b[c+6] = byte(v >> 8) + z.b[c+7] = byte(v) } -func (z *bufioEncWriter) flush() { - if err := z.flushErr(); err != nil { - panic(err) +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) == 0 { + return } + c := z.grow(len(s)) + copy(z.b[c:], s) } -func (z *bufioEncWriter) writeb(s []byte) { -LOOP: - a := len(z.buf) - z.n - if len(s) > a { - z.n += copy(z.buf[z.n:], s[:a]) - s = s[a:] - z.flush() - goto LOOP - } - z.n += copy(z.buf[z.n:], s) +func (z *bytesEncWriter) writestr(s string) { + c := z.grow(len(s)) + copy(z.b[c:], s) } -func (z *bufioEncWriter) writestr(s string) { - // z.writeb(bytesView(s)) // inlined below -LOOP: - a := len(z.buf) - z.n - if len(s) > a { - z.n += copy(z.buf[z.n:], s[:a]) - s = s[a:] - z.flush() - goto LOOP - } - z.n += copy(z.buf[z.n:], s) +func (z *bytesEncWriter) writen1(b1 byte) { + c := z.grow(1) + z.b[c] = b1 } -func (z *bufioEncWriter) writen1(b1 byte) { - if 1 > len(z.buf)-z.n { - z.flush() - } - z.buf[z.n] = b1 - z.n++ +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + c := z.grow(2) + z.b[c] = b1 + z.b[c+1] = b2 } -func (z *bufioEncWriter) writen2(b1, b2 byte) { - if 2 > len(z.buf)-z.n { - z.flush() - } - z.buf[z.n+1] = b2 - z.buf[z.n] = b1 - z.n += 2 +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] } -func (z *bufioEncWriter) endErr() (err error) { - if z.n > 0 { - err = z.flushErr() +func (z *bytesEncWriter) grow(n int) (oldcursor int) { + oldcursor = z.c + z.c = oldcursor + n + if z.c > cap(z.b) { + // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). + // However, it was too expensive, causing too many iterations of copy. + // Using bytes.Buffer model was much better (2*cap + n) + bs := make([]byte, 2*cap(z.b)+n) + copy(bs, z.b[:oldcursor]) + z.b = bs + } else if z.c > len(z.b) { + z.b = z.b[:cap(z.b)] } return } // --------------------------------------------- -// bytesEncAppender implements encWriter and can write to an byte slice. -type bytesEncAppender struct { - b []byte - out *[]byte +type encFnInfo struct { + ti *typeInfo + e *Encoder + ee encDriver + xfFn func(reflect.Value) ([]byte, error) + xfTag byte } -func (z *bytesEncAppender) writeb(s []byte) { - z.b = append(z.b, s...) -} -func (z *bytesEncAppender) writestr(s string) { - z.b = append(z.b, s...) -} -func (z *bytesEncAppender) writen1(b1 byte) { - z.b = append(z.b, b1) -} -func (z *bytesEncAppender) writen2(b1, b2 byte) { - z.b = append(z.b, b1, b2) +func (f *encFnInfo) builtin(rv reflect.Value) { + f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) } -func (z *bytesEncAppender) endErr() error { - *(z.out) = z.b - return nil -} -func (z *bytesEncAppender) reset(in []byte, out *[]byte) { - z.b = in[:0] - z.out = out + +func (f *encFnInfo) rawExt(rv reflect.Value) { + f.e.encRawExt(rv.Interface().(RawExt)) } -// --------------------------------------------- +func (f *encFnInfo) ext(rv reflect.Value) { + bs, fnerr := f.xfFn(rv) + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + return + } + if f.e.hh.writeExt() { + f.ee.encodeExtPreamble(f.xfTag, len(bs)) + f.e.w.writeb(bs) + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } -func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeRawExt(rv2i(rv).(*RawExt), e) } -func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryMarshaler + if f.ti.mIndir == 0 { + bm = rv.Interface().(binaryMarshaler) + } else if f.ti.mIndir == -1 { + bm = rv.Addr().Interface().(binaryMarshaler) + } else { + for j, k := int8(0), f.ti.mIndir; j < k; j++ { + if rv.IsNil() { + f.ee.encodeNil() + return + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryMarshaler) + } + // debugf(">>>> binaryMarshaler: %T", rv.Interface()) + bs, fnerr := bm.MarshalBinary() + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } } -func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { - rv2i(rv).(Selfer).CodecEncodeSelf(e) +func (f *encFnInfo) kBool(rv reflect.Value) { + f.ee.encodeBool(rv.Bool()) } -func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { - bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() - e.marshalRaw(bs, fnerr) +func (f *encFnInfo) kString(rv reflect.Value) { + f.ee.encodeString(c_UTF8, rv.String()) } -func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { - bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() - e.marshalUtf8(bs, fnerr) +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.ee.encodeFloat64(rv.Float()) } -func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { - bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() - e.marshalAsis(bs, fnerr) +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.ee.encodeFloat32(float32(rv.Float())) } -func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { - e.rawBytes(rv2i(rv).(Raw)) +func (f *encFnInfo) kInt(rv reflect.Value) { + f.ee.encodeInt(rv.Int()) } -func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeNil() +func (f *encFnInfo) kUint(rv reflect.Value) { + f.ee.encodeUint(rv.Uint()) } -func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { - e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.ee.encodeNil() } -func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { - ti := f.ti - ee := e.e - // array may be non-addressable, so we have to manage with care - // (don't call rv.Bytes, rv.Slice, etc). - // E.g. type struct S{B [2]byte}; - // Encode(S{}) will bomb on "panic: slice of unaddressable array". - if f.seq != seqTypeArray { - if rv.IsNil() { - ee.EncodeNil() +func (f *encFnInfo) kErr(rv reflect.Value) { + encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case intfSliceTypId: + f.e.encSliceIntf(rv.Interface().([]interface{})) return - } - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if ti.rtid == uint8SliceTypId { - ee.EncodeStringBytesRaw(rv.Bytes()) + case strSliceTypId: + f.e.encSliceStr(rv.Interface().([]string)) + return + case uint64SliceTypId: + f.e.encSliceUint64(rv.Interface().([]uint64)) + return + case int64SliceTypId: + f.e.encSliceInt64(rv.Interface().([]int64)) return } } - if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 { - e.errorf("send-only channel cannot be encoded") - } - elemsep := e.esep - rtelem := ti.elem - rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8 - var l int - // if a slice, array or chan of bytes, treat specially - if rtelemIsByte { - switch f.seq { - case seqTypeSlice: - ee.EncodeStringBytesRaw(rv.Bytes()) - case seqTypeArray: - l = rv.Len() - if rv.CanAddr() { - ee.EncodeStringBytesRaw(rv.Slice(0, l).Bytes()) - } else { - var bs []byte - if l <= cap(e.b) { - bs = e.b[:l] - } else { - bs = make([]byte, l) - } - reflect.Copy(reflect.ValueOf(bs), rv) - ee.EncodeStringBytesRaw(bs) - } - case seqTypeChan: - // do not use range, so that the number of elements encoded - // does not change, and encoding does not hang waiting on someone to close chan. - // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } - // ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte. - - if rv.IsNil() { - ee.EncodeNil() - break - } - bs := e.b[:0] - irv := rv2i(rv) - ch, ok := irv.(<-chan byte) - if !ok { - ch = irv.(chan byte) - } - - L1: - switch timeout := e.h.ChanRecvTimeout; { - case timeout == 0: // only consume available - for { - select { - case b := <-ch: - bs = append(bs, b) - default: - break L1 - } - } - case timeout > 0: // consume until timeout - tt := time.NewTimer(timeout) - for { - select { - case b := <-ch: - bs = append(bs, b) - case <-tt.C: - // close(tt.C) - break L1 - } - } - default: // consume until close - for b := range ch { - bs = append(bs, b) - } - } - ee.EncodeStringBytesRaw(bs) - } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + f.ee.encodeStringBytes(c_RAW, rv.Bytes()) return } - // if chan, consume chan into a slice, and work off that slice. - if f.seq == seqTypeChan { - rvcs := reflect.Zero(reflect.SliceOf(rtelem)) - timeout := e.h.ChanRecvTimeout - if timeout < 0 { // consume until close - for { - recv, recvOk := rv.Recv() - if !recvOk { - break - } - rvcs = reflect.Append(rvcs, recv) - } - } else { - cases := make([]reflect.SelectCase, 2) - cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv} - if timeout == 0 { - cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault} - } else { - tt := time.NewTimer(timeout) - cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)} - } - for { - chosen, recv, recvOk := reflect.Select(cases) - if chosen == 1 || !recvOk { - break - } - rvcs = reflect.Append(rvcs, recv) - } - } - rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected - } - - l = rv.Len() - if ti.mbs { + l := rv.Len() + if f.ti.mbs { if l%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", l) - return + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) } - ee.WriteMapStart(l / 2) + f.ee.encodeMapPreamble(l / 2) } else { - ee.WriteArrayStart(l) + f.ee.encodeArrayPreamble(l) } - - if l > 0 { - var fn *codecFn - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } - // if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - if rtelem.Kind() != reflect.Interface { - fn = e.h.fn(rtelem, true, true) - } - for j := 0; j < l; j++ { - if elemsep { - if ti.mbs { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } else { - ee.WriteArrayElem() - } - } - e.encodeValue(rv.Index(j), fn, true) - } + if l == 0 { + return } - - if ti.mbs { - ee.WriteMapEnd() - } else { - ee.WriteArrayEnd() + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) } } -func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { - fti := f.ti - tisfi := fti.sfiSrc - toMap := !(fti.toArray || e.h.StructToArray) - if toMap { - tisfi = fti.sfiSort - } +func (f *encFnInfo) kArray(rv reflect.Value) { + // We cannot share kSlice method, because the array may be non-addressable. + // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". + // So we have to duplicate the functionality here. + // f.e.encodeValue(rv.Slice(0, rv.Len())) + // f.kSlice(rv.Slice(0, rv.Len())) - ee := e.e - - sfn := structFieldNode{v: rv, update: false} - if toMap { - ee.WriteMapStart(len(tisfi)) - if e.esep { - for _, si := range tisfi { - ee.WriteMapElemKey() - e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) - ee.WriteMapElemValue() - e.encodeValue(sfn.field(si), nil, true) - } - } else { - for _, si := range tisfi { - e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) - e.encodeValue(sfn.field(si), nil, true) - } + l := rv.Len() + // Handle an array of bytes specially (in line with what is done for slices) + if f.ti.rt.Elem().Kind() == reflect.Uint8 { + if l == 0 { + f.ee.encodeStringBytes(c_RAW, nil) + return } - ee.WriteMapEnd() - } else { - ee.WriteArrayStart(len(tisfi)) - if e.esep { - for _, si := range tisfi { - ee.WriteArrayElem() - e.encodeValue(sfn.field(si), nil, true) - } + var bs []byte + if rv.CanAddr() { + bs = rv.Slice(0, l).Bytes() } else { - for _, si := range tisfi { - e.encodeValue(sfn.field(si), nil, true) + bs = make([]byte, l) + for i := 0; i < l; i++ { + bs[i] = byte(rv.Index(i).Uint()) } } - ee.WriteArrayEnd() + f.ee.encodeStringBytes(c_RAW, bs) + return } -} -func (e *Encoder) kStructFieldKey(keyType valueType, encNameAsciiAlphaNum bool, encName string) { - encStructFieldKey(encName, e.e, e.w, keyType, encNameAsciiAlphaNum, e.js) + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } } -func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { +func (f *encFnInfo) kStruct(rv reflect.Value) { fti := f.ti - elemsep := e.esep - tisfi := fti.sfiSrc - var newlen int + newlen := len(fti.sfi) + rvals := make([]reflect.Value, newlen) + var encnames []string + e := f.e + tisfi := fti.sfip toMap := !(fti.toArray || e.h.StructToArray) - var mf map[string]interface{} - if f.ti.mf { - mf = rv2i(rv).(MissingFielder).CodecMissingFields() - toMap = true - newlen += len(mf) - } else if f.ti.mfp { - if rv.CanAddr() { - mf = rv2i(rv.Addr()).(MissingFielder).CodecMissingFields() - } else { - // make a new addressable value of same one, and use it - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - mf = rv2i(rv2).(MissingFielder).CodecMissingFields() - } - toMap = true - newlen += len(mf) - } // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) if toMap { - tisfi = fti.sfiSort + tisfi = fti.sfi + encnames = make([]string, newlen) } - newlen += len(tisfi) - ee := e.e - - // Use sync.Pool to reduce allocating slices unnecessarily. - // The cost of sync.Pool is less than the cost of new allocation. - // - // Each element of the array pools one of encStructPool(8|16|32|64). - // It allows the re-use of slices up to 64 in length. - // A performance cost of encoding structs was collecting - // which values were empty and should be omitted. - // We needed slices of reflect.Value and string to collect them. - // This shared pool reduces the amount of unnecessary creation we do. - // The cost is that of locking sometimes, but sync.Pool is efficient - // enough to reduce thread contention. - - // fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen) - var spool sfiRvPooler - var fkvs = spool.get(newlen) - - var kv sfiRv - recur := e.h.RecursiveEmptyCheck - sfn := structFieldNode{v: rv, update: false} newlen = 0 for _, si := range tisfi { - // kv.r = si.field(rv, false) - kv.r = sfn.field(si) + if si.i != -1 { + rvals[newlen] = rv.Field(int(si.i)) + } else { + rvals[newlen] = rv.FieldByIndex(si.is) + } if toMap { - if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { continue } - kv.v = si // si.encName + encnames[newlen] = si.encName } else { - // use the zero value. - // if a reference or struct, set to nil (so you do not output too much) - if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { - switch kv.r.Kind() { - case reflect.Struct, reflect.Interface, reflect.Ptr, - reflect.Array, reflect.Map, reflect.Slice: - kv.r = reflect.Value{} //encode as nil - } + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + rvals[newlen] = reflect.Value{} //encode as nil } } - fkvs[newlen] = kv newlen++ } - fkvs = fkvs[:newlen] - - var mflen int - for k, v := range mf { - if k == "" { - delete(mf, k) - continue - } - if fti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur, recur) { - delete(mf, k) - continue - } - mflen++ - } - var j int + // debugf(">>>> kStruct: newlen: %v", newlen) if toMap { - ee.WriteMapStart(newlen + mflen) - if elemsep { - for j = 0; j < len(fkvs); j++ { - kv = fkvs[j] - ee.WriteMapElemKey() - e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) - ee.WriteMapElemValue() - e.encodeValue(kv.r, nil, true) - } - } else { - for j = 0; j < len(fkvs); j++ { - kv = fkvs[j] - e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) - e.encodeValue(kv.r, nil, true) + ee := f.ee //don't dereference everytime + ee.encodeMapPreamble(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + if asSymbols { + ee.encodeSymbol(encnames[j]) + } else { + ee.encodeString(c_UTF8, encnames[j]) } + e.encodeValue(rvals[j]) } - // now, add the others - for k, v := range mf { - ee.WriteMapElemKey() - e.kStructFieldKey(fti.keyType, false, k) - ee.WriteMapElemValue() - e.encode(v) - } - ee.WriteMapEnd() } else { - ee.WriteArrayStart(newlen) - if elemsep { - for j = 0; j < len(fkvs); j++ { - ee.WriteArrayElem() - e.encodeValue(fkvs[j].r, nil, true) - } - } else { - for j = 0; j < len(fkvs); j++ { - e.encodeValue(fkvs[j].r, nil, true) - } + f.ee.encodeArrayPreamble(newlen) + for j := 0; j < newlen; j++ { + e.encodeValue(rvals[j]) } - ee.WriteArrayEnd() } - - // do not use defer. Instead, use explicit pool return at end of function. - // defer has a cost we are trying to avoid. - // If there is a panic and these slices are not returned, it is ok. - spool.end() } -func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { - ee := e.e +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.ee.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +func (f *encFnInfo) kInterface(rv reflect.Value) { if rv.IsNil() { - ee.EncodeNil() + f.ee.encodeNil() return } + f.e.encodeValue(rv.Elem()) +} - l := rv.Len() - ee.WriteMapStart(l) - if l == 0 { - ee.WriteMapEnd() +func (f *encFnInfo) kMap(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() return } - // var asSymbols bool - // determine the underlying key and val encFn's for the map. - // This eliminates some work which is done for each loop iteration i.e. - // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. - // - // However, if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var keyFn, valFn *codecFn - ti := f.ti - rtkey0 := ti.key - rtkey := rtkey0 - rtval0 := ti.elem - rtval := rtval0 - // rtkeyid := rt2id(rtkey0) - for rtval.Kind() == reflect.Ptr { - rtval = rtval.Elem() - } - if rtval.Kind() != reflect.Interface { - valFn = e.h.fn(rtval, true, true) + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case mapIntfIntfTypId: + f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) + return + case mapStrIntfTypId: + f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) + return + case mapStrStrTypId: + f.e.encMapStrStr(rv.Interface().(map[string]string)) + return + case mapInt64IntfTypId: + f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) + return + case mapUint64IntfTypId: + f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) + return + } } - mks := rv.MapKeys() - if e.h.Canonical { - e.kMapCanonical(rtkey, rv, mks, valFn) - ee.WriteMapEnd() + l := rv.Len() + f.ee.encodeMapPreamble(l) + if l == 0 { return } - - var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid - if !keyTypeIsString { - for rtkey.Kind() == reflect.Ptr { - rtkey = rtkey.Elem() - } - if rtkey.Kind() != reflect.Interface { - // rtkeyid = rt2id(rtkey) - keyFn = e.h.fn(rtkey, true, true) - } + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + keyTypeIsString := f.ti.rt.Key() == stringTyp + var asSymbols bool + if keyTypeIsString { + asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 } - + mks := rv.MapKeys() // for j, lmks := 0, len(mks); j < lmks; j++ { for j := range mks { - if e.esep { - ee.WriteMapElemKey() - } if keyTypeIsString { - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(mks[j].String())) + if asSymbols { + f.ee.encodeSymbol(mks[j].String()) } else { - ee.EncodeStringEnc(cUTF8, mks[j].String()) + f.ee.encodeString(c_UTF8, mks[j].String()) } } else { - e.encodeValue(mks[j], keyFn, true) - } - if e.esep { - ee.WriteMapElemValue() + f.e.encodeValue(mks[j]) } - e.encodeValue(rv.MapIndex(mks[j]), valFn, true) - + f.e.encodeValue(rv.MapIndex(mks[j])) } - ee.WriteMapEnd() -} -func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) { - ee := e.e - elemsep := e.esep - // we previously did out-of-band if an extension was registered. - // This is not necessary, as the natural kind is sufficient for ordering. - - switch rtkey.Kind() { - case reflect.Bool: - mksv := make([]boolRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bool() - } - sort.Sort(boolRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeBool(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.String: - mksv := make([]stringRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.String() - } - sort.Sort(stringRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(mksv[i].v)) - } else { - ee.EncodeStringEnc(cUTF8, mksv[i].v) - } - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: - mksv := make([]uintRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Uint() - } - sort.Sort(uintRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeUint(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - mksv := make([]intRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Int() - } - sort.Sort(intRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeInt(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Float32: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(mksv[i].v)) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Float64: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - case reflect.Struct: - if rv.Type() == timeTyp { - mksv := make([]timeRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = rv2i(k).(time.Time) - } - sort.Sort(timeRvSlice(mksv)) - for i := range mksv { - if elemsep { - ee.WriteMapElemKey() - } - ee.EncodeTime(mksv[i].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) - } - break - } - fallthrough - default: - // out-of-band - // first encode each key to a []byte first, then sort them, then record - var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - mksbv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksbv[i] - l := len(mksv) - e2.MustEncode(k) - v.r = k - v.v = mksv[l:] - } - sort.Sort(bytesRvSlice(mksbv)) - for j := range mksbv { - if elemsep { - ee.WriteMapElemKey() - } - e.asis(mksbv[j].v) - if elemsep { - ee.WriteMapElemValue() - } - e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) - } - } } -// // -------------------------------------------------- - -type encWriterSwitch struct { - // wi *ioEncWriter - wb bytesEncAppender - wf *bufioEncWriter - // typ entryType - bytes bool // encoding to []byte - esep bool // whether it has elem separators - isas bool // whether e.as != nil - js bool // is json encoder? - be bool // is binary encoder? - _ [2]byte // padding - // _ [2]uint64 // padding - // _ uint64 // padding -} +// -------------------------------------------------- -func (z *encWriterSwitch) writeb(s []byte) { - if z.bytes { - z.wb.writeb(s) - } else { - z.wf.writeb(s) - } -} -func (z *encWriterSwitch) writestr(s string) { - if z.bytes { - z.wb.writestr(s) - } else { - z.wf.writestr(s) - } -} -func (z *encWriterSwitch) writen1(b1 byte) { - if z.bytes { - z.wb.writen1(b1) - } else { - z.wf.writen1(b1) - } -} -func (z *encWriterSwitch) writen2(b1, b2 byte) { - if z.bytes { - z.wb.writen2(b1, b2) - } else { - z.wf.writen2(b1, b2) - } -} -func (z *encWriterSwitch) endErr() error { - if z.bytes { - return z.wb.endErr() - } - return z.wf.endErr() +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i *encFnInfo + f func(*encFnInfo, reflect.Value) } -func (z *encWriterSwitch) end() { - if err := z.endErr(); err != nil { - panic(err) - } -} +// -------------------------------------------------- -/* - -// ------------------------------------------ -func (z *encWriterSwitch) writeb(s []byte) { - switch z.typ { - case entryTypeBytes: - z.wb.writeb(s) - case entryTypeIo: - z.wi.writeb(s) - default: - z.wf.writeb(s) - } -} -func (z *encWriterSwitch) writestr(s string) { - switch z.typ { - case entryTypeBytes: - z.wb.writestr(s) - case entryTypeIo: - z.wi.writestr(s) - default: - z.wf.writestr(s) - } -} -func (z *encWriterSwitch) writen1(b1 byte) { - switch z.typ { - case entryTypeBytes: - z.wb.writen1(b1) - case entryTypeIo: - z.wi.writen1(b1) - default: - z.wf.writen1(b1) - } -} -func (z *encWriterSwitch) writen2(b1, b2 byte) { - switch z.typ { - case entryTypeBytes: - z.wb.writen2(b1, b2) - case entryTypeIo: - z.wi.writen2(b1, b2) - default: - z.wf.writen2(b1, b2) - } -} -func (z *encWriterSwitch) end() { - switch z.typ { - case entryTypeBytes: - z.wb.end() - case entryTypeIo: - z.wi.end() - default: - z.wf.end() - } -} - -// ------------------------------------------ -func (z *encWriterSwitch) writeb(s []byte) { - if z.bytes { - z.wb.writeb(s) - } else { - z.wi.writeb(s) - } -} -func (z *encWriterSwitch) writestr(s string) { - if z.bytes { - z.wb.writestr(s) - } else { - z.wi.writestr(s) - } -} -func (z *encWriterSwitch) writen1(b1 byte) { - if z.bytes { - z.wb.writen1(b1) - } else { - z.wi.writen1(b1) - } -} -func (z *encWriterSwitch) writen2(b1, b2 byte) { - if z.bytes { - z.wb.writen2(b1, b2) - } else { - z.wi.writen2(b1, b2) - } -} -func (z *encWriterSwitch) end() { - if z.bytes { - z.wb.end() - } else { - z.wi.end() - } -} - -*/ - -// Encoder writes an object to an output stream in a supported format. -// -// Encoder is NOT safe for concurrent use i.e. a Encoder cannot be used -// concurrently in multiple goroutines. -// -// However, as Encoder could be allocation heavy to initialize, a Reset method is provided -// so its state can be reused to decode new input streams repeatedly. -// This is the idiomatic way to use. +// An Encoder writes an object to an output stream in the codec format. type Encoder struct { - panicHdl - // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder - e encDriver - - // NOTE: Encoder shouldn't call it's write methods, - // as the handler MAY need to do some coordination. - w *encWriterSwitch - - // bw *bufio.Writer - as encDriverAsis - - err error - + w encWriter + e encDriver h *BasicHandle hh Handle - // ---- cpu cache line boundary? + 3 - encWriterSwitch - - ci set - - b [(5 * 8)]byte // for encoding chan or (non-addressable) [N]byte - - // ---- writable fields during execution --- *try* to keep in sep cache line - - // ---- cpu cache line boundary? - // b [scratchByteArrayLen]byte - // _ [cacheLineSize - scratchByteArrayLen]byte // padding - // b [cacheLineSize - (8 * 0)]byte // used for encoding a chan or (non-addressable) array of bytes + f map[uintptr]encFn + x []uintptr + s []encFn } // NewEncoder returns an Encoder for encoding into an io.Writer. // -// For efficiency, Users are encouraged to configure WriterBufferSize on the handle -// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer). +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). func NewEncoder(w io.Writer, h Handle) *Encoder { - e := newEncoder(h) - e.Reset(w) - return e + ww, ok := w.(ioEncWriterWriter) + if !ok { + sww := simpleIoEncWriterWriter{w: w} + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + ww = &sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + z := ioEncWriter{ + w: ww, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} } // NewEncoderBytes returns an encoder for encoding directly and efficiently @@ -1270,541 +629,373 @@ func NewEncoder(w io.Writer, h Handle) *Encoder { // It will potentially replace the output byte slice pointed to. // After encoding, the out parameter contains the encoded contents. func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - e := newEncoder(h) - e.ResetBytes(out) - return e -} - -func newEncoder(h Handle) *Encoder { - e := &Encoder{h: basicHandle(h), err: errEncoderNotInitialized} - e.bytes = true - if useFinalizers { - runtime.SetFinalizer(e, (*Encoder).finalize) - // xdebugf(">>>> new(Encoder) with finalizer") - } - e.w = &e.encWriterSwitch - e.hh = h - e.esep = h.hasElemSeparators() - - return e -} - -func (e *Encoder) resetCommon() { - // e.w = &e.encWriterSwitch - if e.e == nil || e.hh.recreateEncDriver(e.e) { - e.e = e.hh.newEncDriver(e) - e.as, e.isas = e.e.(encDriverAsis) - // e.cr, _ = e.e.(containerStateRecv) - } - e.be = e.hh.isBinary() - _, e.js = e.hh.(*JsonHandle) - e.e.reset() - e.err = nil -} - -// Reset resets the Encoder with a new output stream. -// -// This accommodates using the state of the Encoder, -// where it has "cached" information about sub-engines. -func (e *Encoder) Reset(w io.Writer) { - if w == nil { - return - } - // var ok bool - e.bytes = false - if e.wf == nil { - e.wf = new(bufioEncWriter) - } - // e.typ = entryTypeUnset - // if e.h.WriterBufferSize > 0 { - // // bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) - // // e.wi.bw = bw - // // e.wi.sw = bw - // // e.wi.fw = bw - // // e.wi.ww = bw - // if e.wf == nil { - // e.wf = new(bufioEncWriter) - // } - // e.wf.reset(w, e.h.WriterBufferSize) - // e.typ = entryTypeBufio - // } else { - // if e.wi == nil { - // e.wi = new(ioEncWriter) - // } - // e.wi.reset(w) - // e.typ = entryTypeIo - // } - e.wf.reset(w, e.h.WriterBufferSize) - // e.typ = entryTypeBufio - - // e.w = e.wi - e.resetCommon() -} - -// ResetBytes resets the Encoder with a new destination output []byte. -func (e *Encoder) ResetBytes(out *[]byte) { - if out == nil { - return - } - var in []byte = *out + in := *out if in == nil { in = make([]byte, defEncByteBufSize) } - e.bytes = true - // e.typ = entryTypeBytes - e.wb.reset(in, out) - // e.w = &e.wb - e.resetCommon() + z := bytesEncWriter{ + b: in, + out: out, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} } -// Encode writes an object into a stream. +// Encode writes an object into a stream in the codec format. // -// Encoding can be configured via the struct tag for the fields. -// The key (in the struct tags) that we look at is configurable. +// Encoding can be configured via the "codec" struct tag for the fields. // -// By default, we look up the "codec" key in the struct field's tags, -// and fall bak to the "json" key if "codec" is absent. -// That key in struct field's tag value is the key name, +// The "codec" key in struct field's tag value is the key name, // followed by an optional comma and options. // // To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. The options -// which can be set on _struct are: -// - omitempty: so all fields are omitted if empty -// - toarray: so struct is encoded as an array -// - int: so struct key names are encoded as signed integers (instead of strings) -// - uint: so struct key names are encoded as unsigned integers (instead of strings) -// - float: so struct key names are encoded as floats (instead of strings) -// More details on these below. +// can create a field called _struct, and set flags on it. // // Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's tag is "-", OR -// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// - the field's codec tag is "-", OR +// - the field is empty and its codec tag specifies the "omitempty" option. // // When encoding as a map, the first string in the tag (before the comma) // is the map key string to use when encoding. -// ... -// This key is typically encoded as a string. -// However, there are instances where the encoded stream has mapping keys encoded as numbers. -// For example, some cbor streams have keys as integer codes in the stream, but they should map -// to fields in a structured object. Consequently, a struct is the natural representation in code. -// For these, configure the struct to encode/decode the keys as numbers (instead of string). -// This is done with the int,uint or float option on the _struct field (see above). // // However, struct values may encode as arrays. This happens when: // - StructToArray Encode option is set, OR -// - the tag on the _struct field sets the "toarray" option -// Note that omitempty is ignored when encoding struct values as arrays, -// as an entry must be encoded for each field, to maintain its position. +// - the codec tag on the _struct field sets the "toarray" option // // Values with types that implement MapBySlice are encoded as stream maps. // // The empty values (for omitempty option) are false, 0, any nil pointer // or interface value, and any array, slice, map, or string of length zero. // -// Anonymous fields are encoded inline except: -// - the struct tag specifies a replacement name (first value) -// - the field is of an interface type +// Anonymous fields are encoded inline if no struct tag is present. +// Else they are encoded as regular fields. // // Examples: // -// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. // type MyStruct struct { // _struct bool `codec:",omitempty"` //set omitempty for every field // Field1 string `codec:"-"` //skip this field // Field2 int `codec:"myName"` //Use key "myName" in encode stream // Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. // Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. -// io.Reader //use key "Reader". -// MyStruct `codec:"my1" //use key "my1". -// MyStruct //inline it // ... // } // // type MyStruct struct { -// _struct bool `codec:",toarray"` //encode struct as an array -// } -// -// type MyStruct struct { -// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys -// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1) -// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2) +// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field +// //and encode struct as an array // } // // The mode of encoding is based on the type of the value. When a value is seen: -// - If a Selfer, call its CodecEncodeSelf method // - If an extension is registered for it, call that extension function -// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method +// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) // - Else encode it based on its reflect.Kind // // Note that struct field names and keys in map[string]XXX will be treated as symbols. // Some formats support symbols (e.g. binc) and will properly encode the string // only once in the stream, and use a tag to refer to it thereafter. func (e *Encoder) Encode(v interface{}) (err error) { - // tried to use closure, as runtime optimizes defer with no params. - // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). - // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 - // defer func() { e.deferred(&err) }() } - // { x, y := e, &err; defer func() { x.deferred(y) }() } - if e.err != nil { - return e.err - } - if recoverPanicToErr { - defer func() { - // if error occurred during encoding, return that error; - // else if error occurred on end'ing (i.e. during flush), return that error. - err = e.w.endErr() - x := recover() - if x == nil { - e.err = err - } else { - panicValToErr(e, x, &e.err) - err = e.err - } - }() - } - - // defer e.deferred(&err) - e.mustEncode(v) - return -} - -// MustEncode is like Encode, but panics if unable to Encode. -// This provides insight to the code location that triggered the error. -func (e *Encoder) MustEncode(v interface{}) { - if e.err != nil { - panic(e.err) - } - e.mustEncode(v) -} - -func (e *Encoder) mustEncode(v interface{}) { - if e.wf == nil { - e.encode(v) - e.e.atEndOfEncode() - e.w.end() - return - } - - if e.wf.buf == nil { - e.wf.buf = e.wf.bytesBufPooler.get(e.wf.sz) - } - e.wf.calls++ - + defer panicToErr(&err) e.encode(v) - - e.wf.calls-- - - if e.wf.calls == 0 { - e.e.atEndOfEncode() - e.w.end() - if !e.h.ExplicitRelease { - e.wf.release() - } - } -} - -// func (e *Encoder) deferred(err1 *error) { -// e.w.end() -// if recoverPanicToErr { -// if x := recover(); x != nil { -// panicValToErr(e, x, err1) -// panicValToErr(e, x, &e.err) -// } -// } -// } - -//go:noinline -- as it is run by finalizer -func (e *Encoder) finalize() { - // xdebugf("finalizing Encoder") - e.Release() -} - -// Release releases shared (pooled) resources. -// -// It is important to call Release() when done with an Encoder, so those resources -// are released instantly for use by subsequently created Encoders. -func (e *Encoder) Release() { - if e.wf != nil { - e.wf.release() - } + e.w.atEndOfEncode() + return } func (e *Encoder) encode(iv interface{}) { - // a switch with only concrete types can be optimized. - // consequently, we deal with nil and interfaces outside the switch. - - if iv == nil || definitelyNil(iv) { - e.e.EncodeNil() - return - } - switch v := iv.(type) { - // case nil: - // case Selfer: - case Raw: - e.rawBytes(v) + case nil: + e.e.encodeNil() + case reflect.Value: - e.encodeValue(v, nil, true) + e.encodeValue(v) case string: - if e.h.StringToRaw { - e.e.EncodeStringBytesRaw(bytesView(v)) - } else { - e.e.EncodeStringEnc(cUTF8, v) - } + e.e.encodeString(c_UTF8, v) case bool: - e.e.EncodeBool(v) + e.e.encodeBool(v) case int: - e.e.EncodeInt(int64(v)) + e.e.encodeInt(int64(v)) case int8: - e.e.EncodeInt(int64(v)) + e.e.encodeInt(int64(v)) case int16: - e.e.EncodeInt(int64(v)) + e.e.encodeInt(int64(v)) case int32: - e.e.EncodeInt(int64(v)) + e.e.encodeInt(int64(v)) case int64: - e.e.EncodeInt(v) + e.e.encodeInt(v) case uint: - e.e.EncodeUint(uint64(v)) + e.e.encodeUint(uint64(v)) case uint8: - e.e.EncodeUint(uint64(v)) + e.e.encodeUint(uint64(v)) case uint16: - e.e.EncodeUint(uint64(v)) + e.e.encodeUint(uint64(v)) case uint32: - e.e.EncodeUint(uint64(v)) + e.e.encodeUint(uint64(v)) case uint64: - e.e.EncodeUint(v) - case uintptr: - e.e.EncodeUint(uint64(v)) + e.e.encodeUint(v) case float32: - e.e.EncodeFloat32(v) + e.e.encodeFloat32(v) case float64: - e.e.EncodeFloat64(v) - case time.Time: - e.e.EncodeTime(v) + e.e.encodeFloat64(v) + + case []interface{}: + e.encSliceIntf(v) + case []string: + e.encSliceStr(v) + case []int64: + e.encSliceInt64(v) + case []uint64: + e.encSliceUint64(v) case []uint8: - e.e.EncodeStringBytesRaw(v) - - case *Raw: - e.rawBytes(*v) + e.e.encodeStringBytes(c_RAW, v) + + case map[interface{}]interface{}: + e.encMapIntfIntf(v) + case map[string]interface{}: + e.encMapStrIntf(v) + case map[string]string: + e.encMapStrStr(v) + case map[int64]interface{}: + e.encMapInt64Intf(v) + case map[uint64]interface{}: + e.encMapUint64Intf(v) case *string: - if e.h.StringToRaw { - e.e.EncodeStringBytesRaw(bytesView(*v)) - } else { - e.e.EncodeStringEnc(cUTF8, *v) - } + e.e.encodeString(c_UTF8, *v) case *bool: - e.e.EncodeBool(*v) + e.e.encodeBool(*v) case *int: - e.e.EncodeInt(int64(*v)) + e.e.encodeInt(int64(*v)) case *int8: - e.e.EncodeInt(int64(*v)) + e.e.encodeInt(int64(*v)) case *int16: - e.e.EncodeInt(int64(*v)) + e.e.encodeInt(int64(*v)) case *int32: - e.e.EncodeInt(int64(*v)) + e.e.encodeInt(int64(*v)) case *int64: - e.e.EncodeInt(*v) + e.e.encodeInt(*v) case *uint: - e.e.EncodeUint(uint64(*v)) + e.e.encodeUint(uint64(*v)) case *uint8: - e.e.EncodeUint(uint64(*v)) + e.e.encodeUint(uint64(*v)) case *uint16: - e.e.EncodeUint(uint64(*v)) + e.e.encodeUint(uint64(*v)) case *uint32: - e.e.EncodeUint(uint64(*v)) + e.e.encodeUint(uint64(*v)) case *uint64: - e.e.EncodeUint(*v) - case *uintptr: - e.e.EncodeUint(uint64(*v)) + e.e.encodeUint(*v) case *float32: - e.e.EncodeFloat32(*v) + e.e.encodeFloat32(*v) case *float64: - e.e.EncodeFloat64(*v) - case *time.Time: - e.e.EncodeTime(*v) - + e.e.encodeFloat64(*v) + + case *[]interface{}: + e.encSliceIntf(*v) + case *[]string: + e.encSliceStr(*v) + case *[]int64: + e.encSliceInt64(*v) + case *[]uint64: + e.encSliceUint64(*v) case *[]uint8: - e.e.EncodeStringBytesRaw(*v) + e.e.encodeStringBytes(c_RAW, *v) + + case *map[interface{}]interface{}: + e.encMapIntfIntf(*v) + case *map[string]interface{}: + e.encMapStrIntf(*v) + case *map[string]string: + e.encMapStrStr(*v) + case *map[int64]interface{}: + e.encMapInt64Intf(*v) + case *map[uint64]interface{}: + e.encMapUint64Intf(*v) default: - if v, ok := iv.(Selfer); ok { - v.CodecEncodeSelf(e) - } else if !fastpathEncodeTypeSwitch(iv, e) { - // checkfastpath=true (not false), as underlying slice/map type may be fast-path - e.encodeValue(reflect.ValueOf(iv), nil, true) - } + e.encodeValue(reflect.ValueOf(iv)) } } -func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { - // if a valid fn is passed, it MUST BE for the dereferenced type of rv - var sptr uintptr - var rvp reflect.Value - var rvpValid bool -TOP: - switch rv.Kind() { - case reflect.Ptr: - if rv.IsNil() { - e.e.EncodeNil() - return - } - rvpValid = true - rvp = rv - rv = rv.Elem() - if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { - // TODO: Movable pointers will be an issue here. Future problem. - sptr = rv.UnsafeAddr() - break TOP - } - goto TOP - case reflect.Interface: +func (e *Encoder) encodeValue(rv reflect.Value) { + for rv.Kind() == reflect.Ptr { if rv.IsNil() { - e.e.EncodeNil() + e.e.encodeNil() return } rv = rv.Elem() - goto TOP - case reflect.Slice, reflect.Map: - if rv.IsNil() { - e.e.EncodeNil() - return - } - case reflect.Invalid, reflect.Func: - e.e.EncodeNil() - return } - if sptr != 0 && (&e.ci).add(sptr) { - e.errorf("circular reference found: # %d", sptr) - } + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() - if fn == nil { - rt := rv.Type() - // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer - fn = e.h.fn(rt, checkFastpath, true) + // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } + var fn encFn + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i, v := range e.x { + if v == rtid { + fn, ok = e.s[i], true + break + } + } } - if fn.i.addrE { - if rvpValid { - fn.fe(e, &fn.i, rvp) - } else if rv.CanAddr() { - fn.fe(e, &fn.i, rv.Addr()) + if !ok { + // debugf("\tCreating new enc fn for type: %v\n", rt) + fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} + fn.i = &fi + if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.isBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*encFnInfo).ext + } else if supportBinaryMarshal && fi.ti.m { + fn.f = (*encFnInfo).binaryMarshal } else { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - fn.fe(e, &fn.i, rv2) + switch rk := rt.Kind(); rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Slice: + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fn.f = (*encFnInfo).kArray + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + case reflect.Interface: + fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]encFn, 16) + } + e.f[rtid] = fn + } else { + e.s = append(e.s, fn) + e.x = append(e.x, rtid) } - } else { - fn.fe(e, &fn.i, rv) } - if sptr != 0 { - (&e.ci).remove(sptr) - } -} -// func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { -// if fnerr != nil { -// panic(fnerr) -// } -// if bs == nil { -// e.e.EncodeNil() -// } else if asis { -// e.asis(bs) -// } else { -// e.e.EncodeStringBytesRaw(bs) -// } -// } + fn.f(fn.i, rv) -func (e *Encoder) marshalUtf8(bs []byte, fnerr error) { - if fnerr != nil { - panic(fnerr) +} + +func (e *Encoder) encRawExt(re RawExt) { + if re.Data == nil { + e.e.encodeNil() + return } - if bs == nil { - e.e.EncodeNil() + if e.hh.writeExt() { + e.e.encodeExtPreamble(re.Tag, len(re.Data)) + e.w.writeb(re.Data) } else { - e.e.EncodeStringEnc(cUTF8, stringView(bs)) + e.e.encodeStringBytes(c_RAW, re.Data) } } -func (e *Encoder) marshalAsis(bs []byte, fnerr error) { - if fnerr != nil { - panic(fnerr) +// --------------------------------------------- +// short circuit functions for common maps and slices + +func (e *Encoder) encSliceIntf(v []interface{}) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.encode(v2) } - if bs == nil { - e.e.EncodeNil() - } else { - e.asis(bs) +} + +func (e *Encoder) encSliceStr(v []string) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeString(c_UTF8, v2) } } -func (e *Encoder) marshalRaw(bs []byte, fnerr error) { - if fnerr != nil { - panic(fnerr) +func (e *Encoder) encSliceInt64(v []int64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeInt(v2) } - if bs == nil { - e.e.EncodeNil() - } else { - e.e.EncodeStringBytesRaw(bs) +} + +func (e *Encoder) encSliceUint64(v []uint64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeUint(v2) } } -func (e *Encoder) asis(v []byte) { - if e.isas { - e.as.EncodeAsis(v) - } else { - e.w.writeb(v) +func (e *Encoder) encMapStrStr(v map[string]string) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.e.encodeString(c_UTF8, v2) } } -func (e *Encoder) rawBytes(vv Raw) { - v := []byte(vv) - if !e.h.Raw { - e.errorf("Raw values cannot be encoded: %v", v) +func (e *Encoder) encMapStrIntf(v map[string]interface{}) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.encode(v2) } - e.asis(v) } -func (e *Encoder) wrapErr(v interface{}, err *error) { - *err = encodeError{codecError{name: e.hh.Name(), err: v}} +func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeInt(k2) + e.encode(v2) + } } -func encStructFieldKey(encName string, ee encDriver, w *encWriterSwitch, - keyType valueType, encNameAsciiAlphaNum bool, js bool) { - var m must - // use if-else-if, not switch (which compiles to binary-search) - // since keyType is typically valueTypeString, branch prediction is pretty good. - if keyType == valueTypeString { - if js && encNameAsciiAlphaNum { // keyType == valueTypeString - // w.writen1('"') - // w.writestr(encName) - // w.writen1('"') - // ---- - // w.writestr(`"` + encName + `"`) - // ---- - // do concat myself, so it is faster than the generic string concat - b := make([]byte, len(encName)+2) - copy(b[1:], encName) - b[0] = '"' - b[len(b)-1] = '"' - w.writeb(b) - } else { // keyType == valueTypeString - ee.EncodeStringEnc(cUTF8, encName) - } - } else if keyType == valueTypeInt { - ee.EncodeInt(m.Int(strconv.ParseInt(encName, 10, 64))) - } else if keyType == valueTypeUint { - ee.EncodeUint(m.Uint(strconv.ParseUint(encName, 10, 64))) - } else if keyType == valueTypeFloat { - ee.EncodeFloat64(m.Float(strconv.ParseFloat(encName, 64))) +func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeUint(uint64(k2)) + e.encode(v2) } } -// func encStringAsRawBytesMaybe(ee encDriver, s string, stringToRaw bool) { -// if stringToRaw { -// ee.EncodeStringBytesRaw(bytesView(s)) -// } else { -// ee.EncodeStringEnc(cUTF8, s) -// } -// } +func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } +} + +// ---------------------------------------- + +func encErr(format string, params ...interface{}) { + doPanic(msgTagEnc, format, params...) +} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go deleted file mode 100644 index 9bc14bd6..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go +++ /dev/null @@ -1,33668 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from fast-path.go.tmpl - DO NOT EDIT. - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v" - -type fastpathT struct{} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) -} - -type fastpathA [271]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - // Note: we use goto (instead of for loop) so this can be inlined. - // h, i, j := 0, 0, len(x) - var h, i uint - var j = uint(len(x)) -LOOP: - if i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - goto LOOP - } - if i < uint(len(x)) && x[i].rtid == rtid { - return int(i) - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[uint(i)].rtid < x[uint(j)].rtid } -func (x fastpathAslice) Swap(i, j int) { x[uint(i)], x[uint(j)] = x[uint(j)], x[uint(i)] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - var i uint = 0 - fn := func(v interface{}, - fe func(*Encoder, *codecFnInfo, reflect.Value), - fd func(*Decoder, *codecFnInfo, reflect.Value)) { - xrt := reflect.TypeOf(v) - xptr := rt2id(xrt) - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - } - - fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) - fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) - fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) - fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) - fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR) - fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R) - fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R) - fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) - fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR) - fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) - fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R) - fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R) - fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) - fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) - fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) - - fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR) - fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR) - fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR) - fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R) - fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R) - fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R) - fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R) - fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR) - fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR) - fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R) - fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R) - fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R) - fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R) - fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R) - fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R) - fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR) - fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) - fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) - fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR) - fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) - fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R) - fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R) - fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) - fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR) - fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) - fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R) - fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R) - fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) - fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R) - fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R) - fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) - fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) - fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR) - fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR) - fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR) - fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R) - fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R) - fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R) - fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R) - fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR) - fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR) - fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R) - fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R) - fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R) - fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R) - fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R) - fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R) - fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR) - fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR) - fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR) - fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR) - fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R) - fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R) - fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R) - fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R) - fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR) - fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR) - fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R) - fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R) - fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R) - fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R) - fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R) - fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R) - fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR) - fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR) - fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR) - fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR) - fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R) - fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R) - fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R) - fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R) - fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR) - fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR) - fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R) - fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R) - fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R) - fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R) - fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R) - fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R) - fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR) - fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) - fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) - fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR) - fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) - fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R) - fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R) - fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) - fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR) - fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) - fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R) - fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R) - fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) - fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R) - fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R) - fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) - fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) - fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR) - fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR) - fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR) - fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R) - fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R) - fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R) - fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R) - fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR) - fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR) - fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R) - fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R) - fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R) - fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R) - fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R) - fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R) - fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR) - fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR) - fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR) - fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR) - fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R) - fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R) - fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R) - fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R) - fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR) - fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR) - fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R) - fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R) - fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R) - fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R) - fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R) - fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R) - fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR) - fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) - fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) - fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR) - fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) - fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R) - fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R) - fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) - fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR) - fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) - fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R) - fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R) - fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) - fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R) - fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R) - fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) - fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) - fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR) - fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR) - fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR) - fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R) - fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R) - fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R) - fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R) - fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR) - fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR) - fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R) - fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R) - fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R) - fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R) - fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R) - fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R) - fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR) - fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) - fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) - fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR) - fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) - fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R) - fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R) - fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) - fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR) - fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) - fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R) - fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R) - fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) - fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R) - fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R) - fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) - fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) - fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR) - fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR) - fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR) - fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R) - fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R) - fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R) - fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R) - fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR) - fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR) - fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R) - fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R) - fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R) - fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R) - fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R) - fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R) - fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR) - fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR) - fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR) - fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR) - fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R) - fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R) - fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R) - fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R) - fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR) - fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR) - fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R) - fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R) - fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R) - fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R) - fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R) - fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R) - fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR) - fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) - fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) - fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR) - fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) - fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R) - fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R) - fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) - fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR) - fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) - fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R) - fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R) - fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) - fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R) - fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R) - fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) - fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) - fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR) - fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR) - fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR) - fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R) - fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R) - fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R) - fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R) - fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR) - fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR) - fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R) - fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R) - fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R) - fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R) - fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R) - fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R) - fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR) - fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR) - fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR) - fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR) - fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R) - fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R) - fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R) - fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R) - fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR) - fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR) - fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R) - fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R) - fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R) - fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R) - fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R) - fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R) - fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR) - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, e) - case []string: - fastpathTV.EncSliceStringV(v, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, e) - case []float32: - fastpathTV.EncSliceFloat32V(v, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, e) - case []float64: - fastpathTV.EncSliceFloat64V(v, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, e) - case []uint: - fastpathTV.EncSliceUintV(v, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, e) - case []uint16: - fastpathTV.EncSliceUint16V(v, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, e) - case []uint32: - fastpathTV.EncSliceUint32V(v, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, e) - case []uint64: - fastpathTV.EncSliceUint64V(v, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, e) - case []uintptr: - fastpathTV.EncSliceUintptrV(v, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, e) - case []int: - fastpathTV.EncSliceIntV(v, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, e) - case []int8: - fastpathTV.EncSliceInt8V(v, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, e) - case []int16: - fastpathTV.EncSliceInt16V(v, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, e) - case []int32: - fastpathTV.EncSliceInt32V(v, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, e) - case []int64: - fastpathTV.EncSliceInt64V(v, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, e) - case []bool: - fastpathTV.EncSliceBoolV(v, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, e) - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, e) - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, e) - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, e) - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, e) - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, e) - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, e) - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, e) - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, e) - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, e) - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, e) - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, e) - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, e) - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, e) - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, e) - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, e) - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, e) - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, e) - case map[string]string: - fastpathTV.EncMapStringStringV(v, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, e) - case map[string]uint: - fastpathTV.EncMapStringUintV(v, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, e) - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, e) - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, e) - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, e) - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, e) - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, e) - case map[string]int: - fastpathTV.EncMapStringIntV(v, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, e) - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, e) - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, e) - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, e) - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, e) - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, e) - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, e) - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, e) - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, e) - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, e) - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, e) - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, e) - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, e) - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, e) - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, e) - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, e) - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, e) - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, e) - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, e) - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, e) - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, e) - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, e) - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, e) - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, e) - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, e) - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, e) - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, e) - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, e) - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, e) - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, e) - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, e) - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, e) - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, e) - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, e) - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, e) - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, e) - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, e) - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, e) - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, e) - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, e) - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, e) - case map[uint]string: - fastpathTV.EncMapUintStringV(v, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, e) - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, e) - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, e) - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, e) - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, e) - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, e) - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, e) - case map[uint]int: - fastpathTV.EncMapUintIntV(v, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, e) - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, e) - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, e) - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, e) - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, e) - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, e) - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, e) - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, e) - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, e) - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, e) - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, e) - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, e) - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, e) - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, e) - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, e) - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, e) - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, e) - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, e) - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, e) - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, e) - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, e) - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, e) - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, e) - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, e) - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, e) - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, e) - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, e) - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, e) - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, e) - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, e) - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, e) - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, e) - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, e) - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, e) - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, e) - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, e) - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, e) - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, e) - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, e) - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, e) - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, e) - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, e) - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, e) - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, e) - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, e) - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, e) - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, e) - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, e) - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, e) - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, e) - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, e) - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, e) - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, e) - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, e) - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, e) - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, e) - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, e) - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, e) - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, e) - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, e) - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, e) - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, e) - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, e) - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, e) - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, e) - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, e) - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, e) - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, e) - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, e) - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, e) - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, e) - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, e) - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, e) - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, e) - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, e) - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, e) - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, e) - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, e) - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, e) - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, e) - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, e) - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, e) - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, e) - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, e) - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, e) - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, e) - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, e) - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, e) - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, e) - case map[int]string: - fastpathTV.EncMapIntStringV(v, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, e) - case map[int]uint: - fastpathTV.EncMapIntUintV(v, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, e) - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, e) - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, e) - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, e) - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, e) - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, e) - case map[int]int: - fastpathTV.EncMapIntIntV(v, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, e) - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, e) - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, e) - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, e) - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, e) - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, e) - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, e) - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, e) - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, e) - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, e) - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, e) - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, e) - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, e) - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, e) - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, e) - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, e) - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, e) - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, e) - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, e) - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, e) - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, e) - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, e) - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, e) - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, e) - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, e) - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, e) - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, e) - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, e) - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, e) - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, e) - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, e) - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, e) - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, e) - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, e) - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, e) - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, e) - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, e) - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, e) - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, e) - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, e) - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, e) - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, e) - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, e) - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, e) - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, e) - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, e) - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, e) - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, e) - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, e) - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, e) - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, e) - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, e) - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, e) - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, e) - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, e) - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, e) - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, e) - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, e) - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, e) - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, e) - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, e) - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, e) - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, e) - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, e) - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, e) - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, e) - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, e) - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, e) - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, e) - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, e) - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, e) - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, e) - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, e) - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, e) - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, e) - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, e) - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, e) - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, e) - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, e) - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, e) - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, e) - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, e) - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, e) - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, e) - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, e) - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, e) - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, e) - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, e) - - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -// -- -- fast path functions - -func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e) - } else { - fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e) - } -} -func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - e.encode(v2) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - e.encode(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e) - } else { - fastpathTV.EncSliceStringV(rv2i(rv).([]string), e) - } -} -func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e) - } else { - fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e) - } -} -func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeFloat32(v2) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeFloat32(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e) - } else { - fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e) - } -} -func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeFloat64(v2) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeFloat64(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e) - } else { - fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e) - } -} -func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint8V(rv2i(rv).([]uint8), e) - } else { - fastpathTV.EncSliceUint8V(rv2i(rv).([]uint8), e) - } -} -func (_ fastpathT) EncSliceUint8V(v []uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e) - } else { - fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e) - } -} -func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e) - } else { - fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e) - } -} -func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e) - } else { - fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e) - } -} -func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeUint(uint64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e) - } else { - fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e) - } -} -func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - e.encode(v2) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - e.encode(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e) - } else { - fastpathTV.EncSliceIntV(rv2i(rv).([]int), e) - } -} -func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e) - } else { - fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e) - } -} -func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e) - } else { - fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e) - } -} -func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e) - } else { - fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e) - } -} -func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e) - } else { - fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e) - } -} -func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeInt(int64(v2)) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeInt(int64(v2)) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e) - } else { - fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e) - } -} -func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { - ee.WriteArrayElem() - } - ee.EncodeBool(v2) - } - ee.WriteArrayEnd() -} -func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - ee.EncodeBool(v2) - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e) -} -func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e) -} -func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e) -} -func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e) -} -func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e) -} -func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e) -} -func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e) -} -func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e) -} -func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e) -} -func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e) -} -func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e) -} -func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e) -} -func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e) -} -func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e) -} -func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e) -} -func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e) -} -func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.asis(v2[j].v) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) -} -func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) -} -func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[string(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[string(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e) -} -func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) -} -func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e) -} -func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e) -} -func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) -} -func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e) -} -func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) -} -func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e) -} -func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e) -} -func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) -} -func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e) -} -func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e) -} -func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) -} -func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) -} -func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(k2)) - } else { - ee.EncodeStringEnc(cUTF8, k2) - } - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e) -} -func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e) -} -func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[float32(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[float32(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e) -} -func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e) -} -func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e) -} -func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e) -} -func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e) -} -func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e) -} -func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e) -} -func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e) -} -func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e) -} -func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e) -} -func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e) -} -func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e) -} -func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e) -} -func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e) -} -func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(float32(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat32(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e) -} -func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e) -} -func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[float64(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[float64(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e) -} -func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e) -} -func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e) -} -func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e) -} -func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e) -} -func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e) -} -func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e) -} -func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e) -} -func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e) -} -func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e) -} -func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e) -} -func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e) -} -func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e) -} -func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e) -} -func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i uint - for k := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(float64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeFloat64(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e) -} -func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e) -} -func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[uint(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[uint(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e) -} -func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e) -} -func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e) -} -func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e) -} -func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e) -} -func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e) -} -func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e) -} -func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e) -} -func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e) -} -func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e) -} -func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e) -} -func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e) -} -func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e) -} -func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e) -} -func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) -} -func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) -} -func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[uint8(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[uint8(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e) -} -func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) -} -func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e) -} -func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e) -} -func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) -} -func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e) -} -func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) -} -func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e) -} -func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e) -} -func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) -} -func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e) -} -func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e) -} -func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) -} -func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) -} -func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e) -} -func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e) -} -func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[uint16(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[uint16(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e) -} -func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e) -} -func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e) -} -func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e) -} -func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e) -} -func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e) -} -func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e) -} -func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e) -} -func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e) -} -func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e) -} -func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e) -} -func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e) -} -func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e) -} -func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e) -} -func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e) -} -func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e) -} -func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[uint32(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[uint32(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e) -} -func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e) -} -func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e) -} -func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e) -} -func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e) -} -func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e) -} -func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e) -} -func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e) -} -func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e) -} -func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e) -} -func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e) -} -func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e) -} -func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e) -} -func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e) -} -func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) -} -func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) -} -func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[uint64(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[uint64(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e) -} -func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) -} -func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e) -} -func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e) -} -func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) -} -func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e) -} -func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) -} -func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e) -} -func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e) -} -func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) -} -func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e) -} -func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e) -} -func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) -} -func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) -} -func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(uint64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeUint(uint64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e) -} -func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e) -} -func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[uintptr(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[uintptr(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e) -} -func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e) -} -func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e) -} -func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e) -} -func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e) -} -func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e) -} -func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e) -} -func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e) -} -func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e) -} -func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e) -} -func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e) -} -func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e) -} -func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e) -} -func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e) -} -func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - e.encode(uintptr(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - e.encode(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) -} -func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) -} -func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[int(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[int(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e) -} -func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) -} -func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e) -} -func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e) -} -func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) -} -func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e) -} -func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) -} -func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e) -} -func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e) -} -func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) -} -func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e) -} -func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e) -} -func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) -} -func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) -} -func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e) -} -func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e) -} -func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[int8(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[int8(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e) -} -func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e) -} -func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e) -} -func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e) -} -func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e) -} -func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e) -} -func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e) -} -func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e) -} -func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e) -} -func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e) -} -func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e) -} -func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e) -} -func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e) -} -func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e) -} -func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int8(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e) -} -func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e) -} -func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[int16(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[int16(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e) -} -func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e) -} -func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e) -} -func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e) -} -func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e) -} -func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e) -} -func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e) -} -func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e) -} -func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e) -} -func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e) -} -func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e) -} -func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e) -} -func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e) -} -func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e) -} -func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int16(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) -} -func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) -} -func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[int32(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[int32(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e) -} -func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) -} -func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e) -} -func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e) -} -func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) -} -func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e) -} -func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) -} -func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e) -} -func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e) -} -func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) -} -func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e) -} -func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e) -} -func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) -} -func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) -} -func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int32(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e) -} -func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e) -} -func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[int64(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[int64(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e) -} -func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e) -} -func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e) -} -func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e) -} -func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e) -} -func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e) -} -func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e) -} -func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e) -} -func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e) -} -func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e) -} -func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e) -} -func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e) -} -func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e) -} -func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e) -} -func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i uint - for k := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(int64(k2))) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeInt(int64(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e) -} -func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e) -} -func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v[bool(k2)])) - } else { - ee.EncodeStringEnc(cUTF8, v[bool(k2)]) - } - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - if e.h.StringToRaw { - ee.EncodeStringBytesRaw(bytesView(v2)) - } else { - ee.EncodeStringEnc(cUTF8, v2) - } - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e) -} -func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e) -} -func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e) -} -func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e) -} -func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e) -} -func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeUint(uint64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e) -} -func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - e.encode(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e) -} -func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e) -} -func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e) -} -func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e) -} -func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e) -} -func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeInt(int64(v2)) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e) -} -func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat32(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e) -} -func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeFloat64(v2) - } - } - ee.WriteMapEnd() -} - -func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e) -} -func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) { - if v == nil { - e.e.EncodeNil() - return - } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i uint - for k := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(bool(k2)) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if esep { - ee.WriteMapElemKey() - } - ee.EncodeBool(k2) - if esep { - ee.WriteMapElemValue() - } - ee.EncodeBool(v2) - } - } - ee.WriteMapEnd() -} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - var changed bool - switch v := iv.(type) { - - case []interface{}: - var v2 []interface{} - v2, changed = fastpathTV.DecSliceIntfV(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]interface{}: - var v2 []interface{} - v2, changed = fastpathTV.DecSliceIntfV(*v, true, d) - if changed { - *v = v2 - } - case []string: - var v2 []string - v2, changed = fastpathTV.DecSliceStringV(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]string: - var v2 []string - v2, changed = fastpathTV.DecSliceStringV(*v, true, d) - if changed { - *v = v2 - } - case []float32: - var v2 []float32 - v2, changed = fastpathTV.DecSliceFloat32V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]float32: - var v2 []float32 - v2, changed = fastpathTV.DecSliceFloat32V(*v, true, d) - if changed { - *v = v2 - } - case []float64: - var v2 []float64 - v2, changed = fastpathTV.DecSliceFloat64V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]float64: - var v2 []float64 - v2, changed = fastpathTV.DecSliceFloat64V(*v, true, d) - if changed { - *v = v2 - } - case []uint: - var v2 []uint - v2, changed = fastpathTV.DecSliceUintV(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]uint: - var v2 []uint - v2, changed = fastpathTV.DecSliceUintV(*v, true, d) - if changed { - *v = v2 - } - case []uint16: - var v2 []uint16 - v2, changed = fastpathTV.DecSliceUint16V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]uint16: - var v2 []uint16 - v2, changed = fastpathTV.DecSliceUint16V(*v, true, d) - if changed { - *v = v2 - } - case []uint32: - var v2 []uint32 - v2, changed = fastpathTV.DecSliceUint32V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]uint32: - var v2 []uint32 - v2, changed = fastpathTV.DecSliceUint32V(*v, true, d) - if changed { - *v = v2 - } - case []uint64: - var v2 []uint64 - v2, changed = fastpathTV.DecSliceUint64V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]uint64: - var v2 []uint64 - v2, changed = fastpathTV.DecSliceUint64V(*v, true, d) - if changed { - *v = v2 - } - case []uintptr: - var v2 []uintptr - v2, changed = fastpathTV.DecSliceUintptrV(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]uintptr: - var v2 []uintptr - v2, changed = fastpathTV.DecSliceUintptrV(*v, true, d) - if changed { - *v = v2 - } - case []int: - var v2 []int - v2, changed = fastpathTV.DecSliceIntV(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]int: - var v2 []int - v2, changed = fastpathTV.DecSliceIntV(*v, true, d) - if changed { - *v = v2 - } - case []int8: - var v2 []int8 - v2, changed = fastpathTV.DecSliceInt8V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]int8: - var v2 []int8 - v2, changed = fastpathTV.DecSliceInt8V(*v, true, d) - if changed { - *v = v2 - } - case []int16: - var v2 []int16 - v2, changed = fastpathTV.DecSliceInt16V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]int16: - var v2 []int16 - v2, changed = fastpathTV.DecSliceInt16V(*v, true, d) - if changed { - *v = v2 - } - case []int32: - var v2 []int32 - v2, changed = fastpathTV.DecSliceInt32V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]int32: - var v2 []int32 - v2, changed = fastpathTV.DecSliceInt32V(*v, true, d) - if changed { - *v = v2 - } - case []int64: - var v2 []int64 - v2, changed = fastpathTV.DecSliceInt64V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]int64: - var v2 []int64 - v2, changed = fastpathTV.DecSliceInt64V(*v, true, d) - if changed { - *v = v2 - } - case []bool: - var v2 []bool - v2, changed = fastpathTV.DecSliceBoolV(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]bool: - var v2 []bool - v2, changed = fastpathTV.DecSliceBoolV(*v, true, d) - if changed { - *v = v2 - } - - case map[interface{}]interface{}: - fastpathTV.DecMapIntfIntfV(v, false, d) - case *map[interface{}]interface{}: - var v2 map[interface{}]interface{} - v2, changed = fastpathTV.DecMapIntfIntfV(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]string: - fastpathTV.DecMapIntfStringV(v, false, d) - case *map[interface{}]string: - var v2 map[interface{}]string - v2, changed = fastpathTV.DecMapIntfStringV(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]uint: - fastpathTV.DecMapIntfUintV(v, false, d) - case *map[interface{}]uint: - var v2 map[interface{}]uint - v2, changed = fastpathTV.DecMapIntfUintV(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]uint8: - fastpathTV.DecMapIntfUint8V(v, false, d) - case *map[interface{}]uint8: - var v2 map[interface{}]uint8 - v2, changed = fastpathTV.DecMapIntfUint8V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]uint16: - fastpathTV.DecMapIntfUint16V(v, false, d) - case *map[interface{}]uint16: - var v2 map[interface{}]uint16 - v2, changed = fastpathTV.DecMapIntfUint16V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]uint32: - fastpathTV.DecMapIntfUint32V(v, false, d) - case *map[interface{}]uint32: - var v2 map[interface{}]uint32 - v2, changed = fastpathTV.DecMapIntfUint32V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]uint64: - fastpathTV.DecMapIntfUint64V(v, false, d) - case *map[interface{}]uint64: - var v2 map[interface{}]uint64 - v2, changed = fastpathTV.DecMapIntfUint64V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]uintptr: - fastpathTV.DecMapIntfUintptrV(v, false, d) - case *map[interface{}]uintptr: - var v2 map[interface{}]uintptr - v2, changed = fastpathTV.DecMapIntfUintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]int: - fastpathTV.DecMapIntfIntV(v, false, d) - case *map[interface{}]int: - var v2 map[interface{}]int - v2, changed = fastpathTV.DecMapIntfIntV(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]int8: - fastpathTV.DecMapIntfInt8V(v, false, d) - case *map[interface{}]int8: - var v2 map[interface{}]int8 - v2, changed = fastpathTV.DecMapIntfInt8V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]int16: - fastpathTV.DecMapIntfInt16V(v, false, d) - case *map[interface{}]int16: - var v2 map[interface{}]int16 - v2, changed = fastpathTV.DecMapIntfInt16V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]int32: - fastpathTV.DecMapIntfInt32V(v, false, d) - case *map[interface{}]int32: - var v2 map[interface{}]int32 - v2, changed = fastpathTV.DecMapIntfInt32V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]int64: - fastpathTV.DecMapIntfInt64V(v, false, d) - case *map[interface{}]int64: - var v2 map[interface{}]int64 - v2, changed = fastpathTV.DecMapIntfInt64V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]float32: - fastpathTV.DecMapIntfFloat32V(v, false, d) - case *map[interface{}]float32: - var v2 map[interface{}]float32 - v2, changed = fastpathTV.DecMapIntfFloat32V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]float64: - fastpathTV.DecMapIntfFloat64V(v, false, d) - case *map[interface{}]float64: - var v2 map[interface{}]float64 - v2, changed = fastpathTV.DecMapIntfFloat64V(*v, true, d) - if changed { - *v = v2 - } - case map[interface{}]bool: - fastpathTV.DecMapIntfBoolV(v, false, d) - case *map[interface{}]bool: - var v2 map[interface{}]bool - v2, changed = fastpathTV.DecMapIntfBoolV(*v, true, d) - if changed { - *v = v2 - } - case map[string]interface{}: - fastpathTV.DecMapStringIntfV(v, false, d) - case *map[string]interface{}: - var v2 map[string]interface{} - v2, changed = fastpathTV.DecMapStringIntfV(*v, true, d) - if changed { - *v = v2 - } - case map[string]string: - fastpathTV.DecMapStringStringV(v, false, d) - case *map[string]string: - var v2 map[string]string - v2, changed = fastpathTV.DecMapStringStringV(*v, true, d) - if changed { - *v = v2 - } - case map[string]uint: - fastpathTV.DecMapStringUintV(v, false, d) - case *map[string]uint: - var v2 map[string]uint - v2, changed = fastpathTV.DecMapStringUintV(*v, true, d) - if changed { - *v = v2 - } - case map[string]uint8: - fastpathTV.DecMapStringUint8V(v, false, d) - case *map[string]uint8: - var v2 map[string]uint8 - v2, changed = fastpathTV.DecMapStringUint8V(*v, true, d) - if changed { - *v = v2 - } - case map[string]uint16: - fastpathTV.DecMapStringUint16V(v, false, d) - case *map[string]uint16: - var v2 map[string]uint16 - v2, changed = fastpathTV.DecMapStringUint16V(*v, true, d) - if changed { - *v = v2 - } - case map[string]uint32: - fastpathTV.DecMapStringUint32V(v, false, d) - case *map[string]uint32: - var v2 map[string]uint32 - v2, changed = fastpathTV.DecMapStringUint32V(*v, true, d) - if changed { - *v = v2 - } - case map[string]uint64: - fastpathTV.DecMapStringUint64V(v, false, d) - case *map[string]uint64: - var v2 map[string]uint64 - v2, changed = fastpathTV.DecMapStringUint64V(*v, true, d) - if changed { - *v = v2 - } - case map[string]uintptr: - fastpathTV.DecMapStringUintptrV(v, false, d) - case *map[string]uintptr: - var v2 map[string]uintptr - v2, changed = fastpathTV.DecMapStringUintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[string]int: - fastpathTV.DecMapStringIntV(v, false, d) - case *map[string]int: - var v2 map[string]int - v2, changed = fastpathTV.DecMapStringIntV(*v, true, d) - if changed { - *v = v2 - } - case map[string]int8: - fastpathTV.DecMapStringInt8V(v, false, d) - case *map[string]int8: - var v2 map[string]int8 - v2, changed = fastpathTV.DecMapStringInt8V(*v, true, d) - if changed { - *v = v2 - } - case map[string]int16: - fastpathTV.DecMapStringInt16V(v, false, d) - case *map[string]int16: - var v2 map[string]int16 - v2, changed = fastpathTV.DecMapStringInt16V(*v, true, d) - if changed { - *v = v2 - } - case map[string]int32: - fastpathTV.DecMapStringInt32V(v, false, d) - case *map[string]int32: - var v2 map[string]int32 - v2, changed = fastpathTV.DecMapStringInt32V(*v, true, d) - if changed { - *v = v2 - } - case map[string]int64: - fastpathTV.DecMapStringInt64V(v, false, d) - case *map[string]int64: - var v2 map[string]int64 - v2, changed = fastpathTV.DecMapStringInt64V(*v, true, d) - if changed { - *v = v2 - } - case map[string]float32: - fastpathTV.DecMapStringFloat32V(v, false, d) - case *map[string]float32: - var v2 map[string]float32 - v2, changed = fastpathTV.DecMapStringFloat32V(*v, true, d) - if changed { - *v = v2 - } - case map[string]float64: - fastpathTV.DecMapStringFloat64V(v, false, d) - case *map[string]float64: - var v2 map[string]float64 - v2, changed = fastpathTV.DecMapStringFloat64V(*v, true, d) - if changed { - *v = v2 - } - case map[string]bool: - fastpathTV.DecMapStringBoolV(v, false, d) - case *map[string]bool: - var v2 map[string]bool - v2, changed = fastpathTV.DecMapStringBoolV(*v, true, d) - if changed { - *v = v2 - } - case map[float32]interface{}: - fastpathTV.DecMapFloat32IntfV(v, false, d) - case *map[float32]interface{}: - var v2 map[float32]interface{} - v2, changed = fastpathTV.DecMapFloat32IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[float32]string: - fastpathTV.DecMapFloat32StringV(v, false, d) - case *map[float32]string: - var v2 map[float32]string - v2, changed = fastpathTV.DecMapFloat32StringV(*v, true, d) - if changed { - *v = v2 - } - case map[float32]uint: - fastpathTV.DecMapFloat32UintV(v, false, d) - case *map[float32]uint: - var v2 map[float32]uint - v2, changed = fastpathTV.DecMapFloat32UintV(*v, true, d) - if changed { - *v = v2 - } - case map[float32]uint8: - fastpathTV.DecMapFloat32Uint8V(v, false, d) - case *map[float32]uint8: - var v2 map[float32]uint8 - v2, changed = fastpathTV.DecMapFloat32Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]uint16: - fastpathTV.DecMapFloat32Uint16V(v, false, d) - case *map[float32]uint16: - var v2 map[float32]uint16 - v2, changed = fastpathTV.DecMapFloat32Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]uint32: - fastpathTV.DecMapFloat32Uint32V(v, false, d) - case *map[float32]uint32: - var v2 map[float32]uint32 - v2, changed = fastpathTV.DecMapFloat32Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]uint64: - fastpathTV.DecMapFloat32Uint64V(v, false, d) - case *map[float32]uint64: - var v2 map[float32]uint64 - v2, changed = fastpathTV.DecMapFloat32Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]uintptr: - fastpathTV.DecMapFloat32UintptrV(v, false, d) - case *map[float32]uintptr: - var v2 map[float32]uintptr - v2, changed = fastpathTV.DecMapFloat32UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[float32]int: - fastpathTV.DecMapFloat32IntV(v, false, d) - case *map[float32]int: - var v2 map[float32]int - v2, changed = fastpathTV.DecMapFloat32IntV(*v, true, d) - if changed { - *v = v2 - } - case map[float32]int8: - fastpathTV.DecMapFloat32Int8V(v, false, d) - case *map[float32]int8: - var v2 map[float32]int8 - v2, changed = fastpathTV.DecMapFloat32Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]int16: - fastpathTV.DecMapFloat32Int16V(v, false, d) - case *map[float32]int16: - var v2 map[float32]int16 - v2, changed = fastpathTV.DecMapFloat32Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]int32: - fastpathTV.DecMapFloat32Int32V(v, false, d) - case *map[float32]int32: - var v2 map[float32]int32 - v2, changed = fastpathTV.DecMapFloat32Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]int64: - fastpathTV.DecMapFloat32Int64V(v, false, d) - case *map[float32]int64: - var v2 map[float32]int64 - v2, changed = fastpathTV.DecMapFloat32Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]float32: - fastpathTV.DecMapFloat32Float32V(v, false, d) - case *map[float32]float32: - var v2 map[float32]float32 - v2, changed = fastpathTV.DecMapFloat32Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]float64: - fastpathTV.DecMapFloat32Float64V(v, false, d) - case *map[float32]float64: - var v2 map[float32]float64 - v2, changed = fastpathTV.DecMapFloat32Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[float32]bool: - fastpathTV.DecMapFloat32BoolV(v, false, d) - case *map[float32]bool: - var v2 map[float32]bool - v2, changed = fastpathTV.DecMapFloat32BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[float64]interface{}: - fastpathTV.DecMapFloat64IntfV(v, false, d) - case *map[float64]interface{}: - var v2 map[float64]interface{} - v2, changed = fastpathTV.DecMapFloat64IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[float64]string: - fastpathTV.DecMapFloat64StringV(v, false, d) - case *map[float64]string: - var v2 map[float64]string - v2, changed = fastpathTV.DecMapFloat64StringV(*v, true, d) - if changed { - *v = v2 - } - case map[float64]uint: - fastpathTV.DecMapFloat64UintV(v, false, d) - case *map[float64]uint: - var v2 map[float64]uint - v2, changed = fastpathTV.DecMapFloat64UintV(*v, true, d) - if changed { - *v = v2 - } - case map[float64]uint8: - fastpathTV.DecMapFloat64Uint8V(v, false, d) - case *map[float64]uint8: - var v2 map[float64]uint8 - v2, changed = fastpathTV.DecMapFloat64Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]uint16: - fastpathTV.DecMapFloat64Uint16V(v, false, d) - case *map[float64]uint16: - var v2 map[float64]uint16 - v2, changed = fastpathTV.DecMapFloat64Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]uint32: - fastpathTV.DecMapFloat64Uint32V(v, false, d) - case *map[float64]uint32: - var v2 map[float64]uint32 - v2, changed = fastpathTV.DecMapFloat64Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]uint64: - fastpathTV.DecMapFloat64Uint64V(v, false, d) - case *map[float64]uint64: - var v2 map[float64]uint64 - v2, changed = fastpathTV.DecMapFloat64Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]uintptr: - fastpathTV.DecMapFloat64UintptrV(v, false, d) - case *map[float64]uintptr: - var v2 map[float64]uintptr - v2, changed = fastpathTV.DecMapFloat64UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[float64]int: - fastpathTV.DecMapFloat64IntV(v, false, d) - case *map[float64]int: - var v2 map[float64]int - v2, changed = fastpathTV.DecMapFloat64IntV(*v, true, d) - if changed { - *v = v2 - } - case map[float64]int8: - fastpathTV.DecMapFloat64Int8V(v, false, d) - case *map[float64]int8: - var v2 map[float64]int8 - v2, changed = fastpathTV.DecMapFloat64Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]int16: - fastpathTV.DecMapFloat64Int16V(v, false, d) - case *map[float64]int16: - var v2 map[float64]int16 - v2, changed = fastpathTV.DecMapFloat64Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]int32: - fastpathTV.DecMapFloat64Int32V(v, false, d) - case *map[float64]int32: - var v2 map[float64]int32 - v2, changed = fastpathTV.DecMapFloat64Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]int64: - fastpathTV.DecMapFloat64Int64V(v, false, d) - case *map[float64]int64: - var v2 map[float64]int64 - v2, changed = fastpathTV.DecMapFloat64Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]float32: - fastpathTV.DecMapFloat64Float32V(v, false, d) - case *map[float64]float32: - var v2 map[float64]float32 - v2, changed = fastpathTV.DecMapFloat64Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]float64: - fastpathTV.DecMapFloat64Float64V(v, false, d) - case *map[float64]float64: - var v2 map[float64]float64 - v2, changed = fastpathTV.DecMapFloat64Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[float64]bool: - fastpathTV.DecMapFloat64BoolV(v, false, d) - case *map[float64]bool: - var v2 map[float64]bool - v2, changed = fastpathTV.DecMapFloat64BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[uint]interface{}: - fastpathTV.DecMapUintIntfV(v, false, d) - case *map[uint]interface{}: - var v2 map[uint]interface{} - v2, changed = fastpathTV.DecMapUintIntfV(*v, true, d) - if changed { - *v = v2 - } - case map[uint]string: - fastpathTV.DecMapUintStringV(v, false, d) - case *map[uint]string: - var v2 map[uint]string - v2, changed = fastpathTV.DecMapUintStringV(*v, true, d) - if changed { - *v = v2 - } - case map[uint]uint: - fastpathTV.DecMapUintUintV(v, false, d) - case *map[uint]uint: - var v2 map[uint]uint - v2, changed = fastpathTV.DecMapUintUintV(*v, true, d) - if changed { - *v = v2 - } - case map[uint]uint8: - fastpathTV.DecMapUintUint8V(v, false, d) - case *map[uint]uint8: - var v2 map[uint]uint8 - v2, changed = fastpathTV.DecMapUintUint8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]uint16: - fastpathTV.DecMapUintUint16V(v, false, d) - case *map[uint]uint16: - var v2 map[uint]uint16 - v2, changed = fastpathTV.DecMapUintUint16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]uint32: - fastpathTV.DecMapUintUint32V(v, false, d) - case *map[uint]uint32: - var v2 map[uint]uint32 - v2, changed = fastpathTV.DecMapUintUint32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]uint64: - fastpathTV.DecMapUintUint64V(v, false, d) - case *map[uint]uint64: - var v2 map[uint]uint64 - v2, changed = fastpathTV.DecMapUintUint64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]uintptr: - fastpathTV.DecMapUintUintptrV(v, false, d) - case *map[uint]uintptr: - var v2 map[uint]uintptr - v2, changed = fastpathTV.DecMapUintUintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[uint]int: - fastpathTV.DecMapUintIntV(v, false, d) - case *map[uint]int: - var v2 map[uint]int - v2, changed = fastpathTV.DecMapUintIntV(*v, true, d) - if changed { - *v = v2 - } - case map[uint]int8: - fastpathTV.DecMapUintInt8V(v, false, d) - case *map[uint]int8: - var v2 map[uint]int8 - v2, changed = fastpathTV.DecMapUintInt8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]int16: - fastpathTV.DecMapUintInt16V(v, false, d) - case *map[uint]int16: - var v2 map[uint]int16 - v2, changed = fastpathTV.DecMapUintInt16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]int32: - fastpathTV.DecMapUintInt32V(v, false, d) - case *map[uint]int32: - var v2 map[uint]int32 - v2, changed = fastpathTV.DecMapUintInt32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]int64: - fastpathTV.DecMapUintInt64V(v, false, d) - case *map[uint]int64: - var v2 map[uint]int64 - v2, changed = fastpathTV.DecMapUintInt64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]float32: - fastpathTV.DecMapUintFloat32V(v, false, d) - case *map[uint]float32: - var v2 map[uint]float32 - v2, changed = fastpathTV.DecMapUintFloat32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]float64: - fastpathTV.DecMapUintFloat64V(v, false, d) - case *map[uint]float64: - var v2 map[uint]float64 - v2, changed = fastpathTV.DecMapUintFloat64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint]bool: - fastpathTV.DecMapUintBoolV(v, false, d) - case *map[uint]bool: - var v2 map[uint]bool - v2, changed = fastpathTV.DecMapUintBoolV(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]interface{}: - fastpathTV.DecMapUint8IntfV(v, false, d) - case *map[uint8]interface{}: - var v2 map[uint8]interface{} - v2, changed = fastpathTV.DecMapUint8IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]string: - fastpathTV.DecMapUint8StringV(v, false, d) - case *map[uint8]string: - var v2 map[uint8]string - v2, changed = fastpathTV.DecMapUint8StringV(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]uint: - fastpathTV.DecMapUint8UintV(v, false, d) - case *map[uint8]uint: - var v2 map[uint8]uint - v2, changed = fastpathTV.DecMapUint8UintV(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]uint8: - fastpathTV.DecMapUint8Uint8V(v, false, d) - case *map[uint8]uint8: - var v2 map[uint8]uint8 - v2, changed = fastpathTV.DecMapUint8Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]uint16: - fastpathTV.DecMapUint8Uint16V(v, false, d) - case *map[uint8]uint16: - var v2 map[uint8]uint16 - v2, changed = fastpathTV.DecMapUint8Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]uint32: - fastpathTV.DecMapUint8Uint32V(v, false, d) - case *map[uint8]uint32: - var v2 map[uint8]uint32 - v2, changed = fastpathTV.DecMapUint8Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]uint64: - fastpathTV.DecMapUint8Uint64V(v, false, d) - case *map[uint8]uint64: - var v2 map[uint8]uint64 - v2, changed = fastpathTV.DecMapUint8Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]uintptr: - fastpathTV.DecMapUint8UintptrV(v, false, d) - case *map[uint8]uintptr: - var v2 map[uint8]uintptr - v2, changed = fastpathTV.DecMapUint8UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]int: - fastpathTV.DecMapUint8IntV(v, false, d) - case *map[uint8]int: - var v2 map[uint8]int - v2, changed = fastpathTV.DecMapUint8IntV(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]int8: - fastpathTV.DecMapUint8Int8V(v, false, d) - case *map[uint8]int8: - var v2 map[uint8]int8 - v2, changed = fastpathTV.DecMapUint8Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]int16: - fastpathTV.DecMapUint8Int16V(v, false, d) - case *map[uint8]int16: - var v2 map[uint8]int16 - v2, changed = fastpathTV.DecMapUint8Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]int32: - fastpathTV.DecMapUint8Int32V(v, false, d) - case *map[uint8]int32: - var v2 map[uint8]int32 - v2, changed = fastpathTV.DecMapUint8Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]int64: - fastpathTV.DecMapUint8Int64V(v, false, d) - case *map[uint8]int64: - var v2 map[uint8]int64 - v2, changed = fastpathTV.DecMapUint8Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]float32: - fastpathTV.DecMapUint8Float32V(v, false, d) - case *map[uint8]float32: - var v2 map[uint8]float32 - v2, changed = fastpathTV.DecMapUint8Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]float64: - fastpathTV.DecMapUint8Float64V(v, false, d) - case *map[uint8]float64: - var v2 map[uint8]float64 - v2, changed = fastpathTV.DecMapUint8Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint8]bool: - fastpathTV.DecMapUint8BoolV(v, false, d) - case *map[uint8]bool: - var v2 map[uint8]bool - v2, changed = fastpathTV.DecMapUint8BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]interface{}: - fastpathTV.DecMapUint16IntfV(v, false, d) - case *map[uint16]interface{}: - var v2 map[uint16]interface{} - v2, changed = fastpathTV.DecMapUint16IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]string: - fastpathTV.DecMapUint16StringV(v, false, d) - case *map[uint16]string: - var v2 map[uint16]string - v2, changed = fastpathTV.DecMapUint16StringV(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]uint: - fastpathTV.DecMapUint16UintV(v, false, d) - case *map[uint16]uint: - var v2 map[uint16]uint - v2, changed = fastpathTV.DecMapUint16UintV(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]uint8: - fastpathTV.DecMapUint16Uint8V(v, false, d) - case *map[uint16]uint8: - var v2 map[uint16]uint8 - v2, changed = fastpathTV.DecMapUint16Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]uint16: - fastpathTV.DecMapUint16Uint16V(v, false, d) - case *map[uint16]uint16: - var v2 map[uint16]uint16 - v2, changed = fastpathTV.DecMapUint16Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]uint32: - fastpathTV.DecMapUint16Uint32V(v, false, d) - case *map[uint16]uint32: - var v2 map[uint16]uint32 - v2, changed = fastpathTV.DecMapUint16Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]uint64: - fastpathTV.DecMapUint16Uint64V(v, false, d) - case *map[uint16]uint64: - var v2 map[uint16]uint64 - v2, changed = fastpathTV.DecMapUint16Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]uintptr: - fastpathTV.DecMapUint16UintptrV(v, false, d) - case *map[uint16]uintptr: - var v2 map[uint16]uintptr - v2, changed = fastpathTV.DecMapUint16UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]int: - fastpathTV.DecMapUint16IntV(v, false, d) - case *map[uint16]int: - var v2 map[uint16]int - v2, changed = fastpathTV.DecMapUint16IntV(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]int8: - fastpathTV.DecMapUint16Int8V(v, false, d) - case *map[uint16]int8: - var v2 map[uint16]int8 - v2, changed = fastpathTV.DecMapUint16Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]int16: - fastpathTV.DecMapUint16Int16V(v, false, d) - case *map[uint16]int16: - var v2 map[uint16]int16 - v2, changed = fastpathTV.DecMapUint16Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]int32: - fastpathTV.DecMapUint16Int32V(v, false, d) - case *map[uint16]int32: - var v2 map[uint16]int32 - v2, changed = fastpathTV.DecMapUint16Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]int64: - fastpathTV.DecMapUint16Int64V(v, false, d) - case *map[uint16]int64: - var v2 map[uint16]int64 - v2, changed = fastpathTV.DecMapUint16Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]float32: - fastpathTV.DecMapUint16Float32V(v, false, d) - case *map[uint16]float32: - var v2 map[uint16]float32 - v2, changed = fastpathTV.DecMapUint16Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]float64: - fastpathTV.DecMapUint16Float64V(v, false, d) - case *map[uint16]float64: - var v2 map[uint16]float64 - v2, changed = fastpathTV.DecMapUint16Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint16]bool: - fastpathTV.DecMapUint16BoolV(v, false, d) - case *map[uint16]bool: - var v2 map[uint16]bool - v2, changed = fastpathTV.DecMapUint16BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]interface{}: - fastpathTV.DecMapUint32IntfV(v, false, d) - case *map[uint32]interface{}: - var v2 map[uint32]interface{} - v2, changed = fastpathTV.DecMapUint32IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]string: - fastpathTV.DecMapUint32StringV(v, false, d) - case *map[uint32]string: - var v2 map[uint32]string - v2, changed = fastpathTV.DecMapUint32StringV(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]uint: - fastpathTV.DecMapUint32UintV(v, false, d) - case *map[uint32]uint: - var v2 map[uint32]uint - v2, changed = fastpathTV.DecMapUint32UintV(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]uint8: - fastpathTV.DecMapUint32Uint8V(v, false, d) - case *map[uint32]uint8: - var v2 map[uint32]uint8 - v2, changed = fastpathTV.DecMapUint32Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]uint16: - fastpathTV.DecMapUint32Uint16V(v, false, d) - case *map[uint32]uint16: - var v2 map[uint32]uint16 - v2, changed = fastpathTV.DecMapUint32Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]uint32: - fastpathTV.DecMapUint32Uint32V(v, false, d) - case *map[uint32]uint32: - var v2 map[uint32]uint32 - v2, changed = fastpathTV.DecMapUint32Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]uint64: - fastpathTV.DecMapUint32Uint64V(v, false, d) - case *map[uint32]uint64: - var v2 map[uint32]uint64 - v2, changed = fastpathTV.DecMapUint32Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]uintptr: - fastpathTV.DecMapUint32UintptrV(v, false, d) - case *map[uint32]uintptr: - var v2 map[uint32]uintptr - v2, changed = fastpathTV.DecMapUint32UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]int: - fastpathTV.DecMapUint32IntV(v, false, d) - case *map[uint32]int: - var v2 map[uint32]int - v2, changed = fastpathTV.DecMapUint32IntV(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]int8: - fastpathTV.DecMapUint32Int8V(v, false, d) - case *map[uint32]int8: - var v2 map[uint32]int8 - v2, changed = fastpathTV.DecMapUint32Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]int16: - fastpathTV.DecMapUint32Int16V(v, false, d) - case *map[uint32]int16: - var v2 map[uint32]int16 - v2, changed = fastpathTV.DecMapUint32Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]int32: - fastpathTV.DecMapUint32Int32V(v, false, d) - case *map[uint32]int32: - var v2 map[uint32]int32 - v2, changed = fastpathTV.DecMapUint32Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]int64: - fastpathTV.DecMapUint32Int64V(v, false, d) - case *map[uint32]int64: - var v2 map[uint32]int64 - v2, changed = fastpathTV.DecMapUint32Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]float32: - fastpathTV.DecMapUint32Float32V(v, false, d) - case *map[uint32]float32: - var v2 map[uint32]float32 - v2, changed = fastpathTV.DecMapUint32Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]float64: - fastpathTV.DecMapUint32Float64V(v, false, d) - case *map[uint32]float64: - var v2 map[uint32]float64 - v2, changed = fastpathTV.DecMapUint32Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint32]bool: - fastpathTV.DecMapUint32BoolV(v, false, d) - case *map[uint32]bool: - var v2 map[uint32]bool - v2, changed = fastpathTV.DecMapUint32BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]interface{}: - fastpathTV.DecMapUint64IntfV(v, false, d) - case *map[uint64]interface{}: - var v2 map[uint64]interface{} - v2, changed = fastpathTV.DecMapUint64IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]string: - fastpathTV.DecMapUint64StringV(v, false, d) - case *map[uint64]string: - var v2 map[uint64]string - v2, changed = fastpathTV.DecMapUint64StringV(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]uint: - fastpathTV.DecMapUint64UintV(v, false, d) - case *map[uint64]uint: - var v2 map[uint64]uint - v2, changed = fastpathTV.DecMapUint64UintV(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]uint8: - fastpathTV.DecMapUint64Uint8V(v, false, d) - case *map[uint64]uint8: - var v2 map[uint64]uint8 - v2, changed = fastpathTV.DecMapUint64Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]uint16: - fastpathTV.DecMapUint64Uint16V(v, false, d) - case *map[uint64]uint16: - var v2 map[uint64]uint16 - v2, changed = fastpathTV.DecMapUint64Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]uint32: - fastpathTV.DecMapUint64Uint32V(v, false, d) - case *map[uint64]uint32: - var v2 map[uint64]uint32 - v2, changed = fastpathTV.DecMapUint64Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]uint64: - fastpathTV.DecMapUint64Uint64V(v, false, d) - case *map[uint64]uint64: - var v2 map[uint64]uint64 - v2, changed = fastpathTV.DecMapUint64Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]uintptr: - fastpathTV.DecMapUint64UintptrV(v, false, d) - case *map[uint64]uintptr: - var v2 map[uint64]uintptr - v2, changed = fastpathTV.DecMapUint64UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]int: - fastpathTV.DecMapUint64IntV(v, false, d) - case *map[uint64]int: - var v2 map[uint64]int - v2, changed = fastpathTV.DecMapUint64IntV(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]int8: - fastpathTV.DecMapUint64Int8V(v, false, d) - case *map[uint64]int8: - var v2 map[uint64]int8 - v2, changed = fastpathTV.DecMapUint64Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]int16: - fastpathTV.DecMapUint64Int16V(v, false, d) - case *map[uint64]int16: - var v2 map[uint64]int16 - v2, changed = fastpathTV.DecMapUint64Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]int32: - fastpathTV.DecMapUint64Int32V(v, false, d) - case *map[uint64]int32: - var v2 map[uint64]int32 - v2, changed = fastpathTV.DecMapUint64Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]int64: - fastpathTV.DecMapUint64Int64V(v, false, d) - case *map[uint64]int64: - var v2 map[uint64]int64 - v2, changed = fastpathTV.DecMapUint64Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]float32: - fastpathTV.DecMapUint64Float32V(v, false, d) - case *map[uint64]float32: - var v2 map[uint64]float32 - v2, changed = fastpathTV.DecMapUint64Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]float64: - fastpathTV.DecMapUint64Float64V(v, false, d) - case *map[uint64]float64: - var v2 map[uint64]float64 - v2, changed = fastpathTV.DecMapUint64Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[uint64]bool: - fastpathTV.DecMapUint64BoolV(v, false, d) - case *map[uint64]bool: - var v2 map[uint64]bool - v2, changed = fastpathTV.DecMapUint64BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]interface{}: - fastpathTV.DecMapUintptrIntfV(v, false, d) - case *map[uintptr]interface{}: - var v2 map[uintptr]interface{} - v2, changed = fastpathTV.DecMapUintptrIntfV(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]string: - fastpathTV.DecMapUintptrStringV(v, false, d) - case *map[uintptr]string: - var v2 map[uintptr]string - v2, changed = fastpathTV.DecMapUintptrStringV(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]uint: - fastpathTV.DecMapUintptrUintV(v, false, d) - case *map[uintptr]uint: - var v2 map[uintptr]uint - v2, changed = fastpathTV.DecMapUintptrUintV(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]uint8: - fastpathTV.DecMapUintptrUint8V(v, false, d) - case *map[uintptr]uint8: - var v2 map[uintptr]uint8 - v2, changed = fastpathTV.DecMapUintptrUint8V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]uint16: - fastpathTV.DecMapUintptrUint16V(v, false, d) - case *map[uintptr]uint16: - var v2 map[uintptr]uint16 - v2, changed = fastpathTV.DecMapUintptrUint16V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]uint32: - fastpathTV.DecMapUintptrUint32V(v, false, d) - case *map[uintptr]uint32: - var v2 map[uintptr]uint32 - v2, changed = fastpathTV.DecMapUintptrUint32V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]uint64: - fastpathTV.DecMapUintptrUint64V(v, false, d) - case *map[uintptr]uint64: - var v2 map[uintptr]uint64 - v2, changed = fastpathTV.DecMapUintptrUint64V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]uintptr: - fastpathTV.DecMapUintptrUintptrV(v, false, d) - case *map[uintptr]uintptr: - var v2 map[uintptr]uintptr - v2, changed = fastpathTV.DecMapUintptrUintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]int: - fastpathTV.DecMapUintptrIntV(v, false, d) - case *map[uintptr]int: - var v2 map[uintptr]int - v2, changed = fastpathTV.DecMapUintptrIntV(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]int8: - fastpathTV.DecMapUintptrInt8V(v, false, d) - case *map[uintptr]int8: - var v2 map[uintptr]int8 - v2, changed = fastpathTV.DecMapUintptrInt8V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]int16: - fastpathTV.DecMapUintptrInt16V(v, false, d) - case *map[uintptr]int16: - var v2 map[uintptr]int16 - v2, changed = fastpathTV.DecMapUintptrInt16V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]int32: - fastpathTV.DecMapUintptrInt32V(v, false, d) - case *map[uintptr]int32: - var v2 map[uintptr]int32 - v2, changed = fastpathTV.DecMapUintptrInt32V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]int64: - fastpathTV.DecMapUintptrInt64V(v, false, d) - case *map[uintptr]int64: - var v2 map[uintptr]int64 - v2, changed = fastpathTV.DecMapUintptrInt64V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]float32: - fastpathTV.DecMapUintptrFloat32V(v, false, d) - case *map[uintptr]float32: - var v2 map[uintptr]float32 - v2, changed = fastpathTV.DecMapUintptrFloat32V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]float64: - fastpathTV.DecMapUintptrFloat64V(v, false, d) - case *map[uintptr]float64: - var v2 map[uintptr]float64 - v2, changed = fastpathTV.DecMapUintptrFloat64V(*v, true, d) - if changed { - *v = v2 - } - case map[uintptr]bool: - fastpathTV.DecMapUintptrBoolV(v, false, d) - case *map[uintptr]bool: - var v2 map[uintptr]bool - v2, changed = fastpathTV.DecMapUintptrBoolV(*v, true, d) - if changed { - *v = v2 - } - case map[int]interface{}: - fastpathTV.DecMapIntIntfV(v, false, d) - case *map[int]interface{}: - var v2 map[int]interface{} - v2, changed = fastpathTV.DecMapIntIntfV(*v, true, d) - if changed { - *v = v2 - } - case map[int]string: - fastpathTV.DecMapIntStringV(v, false, d) - case *map[int]string: - var v2 map[int]string - v2, changed = fastpathTV.DecMapIntStringV(*v, true, d) - if changed { - *v = v2 - } - case map[int]uint: - fastpathTV.DecMapIntUintV(v, false, d) - case *map[int]uint: - var v2 map[int]uint - v2, changed = fastpathTV.DecMapIntUintV(*v, true, d) - if changed { - *v = v2 - } - case map[int]uint8: - fastpathTV.DecMapIntUint8V(v, false, d) - case *map[int]uint8: - var v2 map[int]uint8 - v2, changed = fastpathTV.DecMapIntUint8V(*v, true, d) - if changed { - *v = v2 - } - case map[int]uint16: - fastpathTV.DecMapIntUint16V(v, false, d) - case *map[int]uint16: - var v2 map[int]uint16 - v2, changed = fastpathTV.DecMapIntUint16V(*v, true, d) - if changed { - *v = v2 - } - case map[int]uint32: - fastpathTV.DecMapIntUint32V(v, false, d) - case *map[int]uint32: - var v2 map[int]uint32 - v2, changed = fastpathTV.DecMapIntUint32V(*v, true, d) - if changed { - *v = v2 - } - case map[int]uint64: - fastpathTV.DecMapIntUint64V(v, false, d) - case *map[int]uint64: - var v2 map[int]uint64 - v2, changed = fastpathTV.DecMapIntUint64V(*v, true, d) - if changed { - *v = v2 - } - case map[int]uintptr: - fastpathTV.DecMapIntUintptrV(v, false, d) - case *map[int]uintptr: - var v2 map[int]uintptr - v2, changed = fastpathTV.DecMapIntUintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[int]int: - fastpathTV.DecMapIntIntV(v, false, d) - case *map[int]int: - var v2 map[int]int - v2, changed = fastpathTV.DecMapIntIntV(*v, true, d) - if changed { - *v = v2 - } - case map[int]int8: - fastpathTV.DecMapIntInt8V(v, false, d) - case *map[int]int8: - var v2 map[int]int8 - v2, changed = fastpathTV.DecMapIntInt8V(*v, true, d) - if changed { - *v = v2 - } - case map[int]int16: - fastpathTV.DecMapIntInt16V(v, false, d) - case *map[int]int16: - var v2 map[int]int16 - v2, changed = fastpathTV.DecMapIntInt16V(*v, true, d) - if changed { - *v = v2 - } - case map[int]int32: - fastpathTV.DecMapIntInt32V(v, false, d) - case *map[int]int32: - var v2 map[int]int32 - v2, changed = fastpathTV.DecMapIntInt32V(*v, true, d) - if changed { - *v = v2 - } - case map[int]int64: - fastpathTV.DecMapIntInt64V(v, false, d) - case *map[int]int64: - var v2 map[int]int64 - v2, changed = fastpathTV.DecMapIntInt64V(*v, true, d) - if changed { - *v = v2 - } - case map[int]float32: - fastpathTV.DecMapIntFloat32V(v, false, d) - case *map[int]float32: - var v2 map[int]float32 - v2, changed = fastpathTV.DecMapIntFloat32V(*v, true, d) - if changed { - *v = v2 - } - case map[int]float64: - fastpathTV.DecMapIntFloat64V(v, false, d) - case *map[int]float64: - var v2 map[int]float64 - v2, changed = fastpathTV.DecMapIntFloat64V(*v, true, d) - if changed { - *v = v2 - } - case map[int]bool: - fastpathTV.DecMapIntBoolV(v, false, d) - case *map[int]bool: - var v2 map[int]bool - v2, changed = fastpathTV.DecMapIntBoolV(*v, true, d) - if changed { - *v = v2 - } - case map[int8]interface{}: - fastpathTV.DecMapInt8IntfV(v, false, d) - case *map[int8]interface{}: - var v2 map[int8]interface{} - v2, changed = fastpathTV.DecMapInt8IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[int8]string: - fastpathTV.DecMapInt8StringV(v, false, d) - case *map[int8]string: - var v2 map[int8]string - v2, changed = fastpathTV.DecMapInt8StringV(*v, true, d) - if changed { - *v = v2 - } - case map[int8]uint: - fastpathTV.DecMapInt8UintV(v, false, d) - case *map[int8]uint: - var v2 map[int8]uint - v2, changed = fastpathTV.DecMapInt8UintV(*v, true, d) - if changed { - *v = v2 - } - case map[int8]uint8: - fastpathTV.DecMapInt8Uint8V(v, false, d) - case *map[int8]uint8: - var v2 map[int8]uint8 - v2, changed = fastpathTV.DecMapInt8Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]uint16: - fastpathTV.DecMapInt8Uint16V(v, false, d) - case *map[int8]uint16: - var v2 map[int8]uint16 - v2, changed = fastpathTV.DecMapInt8Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]uint32: - fastpathTV.DecMapInt8Uint32V(v, false, d) - case *map[int8]uint32: - var v2 map[int8]uint32 - v2, changed = fastpathTV.DecMapInt8Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]uint64: - fastpathTV.DecMapInt8Uint64V(v, false, d) - case *map[int8]uint64: - var v2 map[int8]uint64 - v2, changed = fastpathTV.DecMapInt8Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]uintptr: - fastpathTV.DecMapInt8UintptrV(v, false, d) - case *map[int8]uintptr: - var v2 map[int8]uintptr - v2, changed = fastpathTV.DecMapInt8UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[int8]int: - fastpathTV.DecMapInt8IntV(v, false, d) - case *map[int8]int: - var v2 map[int8]int - v2, changed = fastpathTV.DecMapInt8IntV(*v, true, d) - if changed { - *v = v2 - } - case map[int8]int8: - fastpathTV.DecMapInt8Int8V(v, false, d) - case *map[int8]int8: - var v2 map[int8]int8 - v2, changed = fastpathTV.DecMapInt8Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]int16: - fastpathTV.DecMapInt8Int16V(v, false, d) - case *map[int8]int16: - var v2 map[int8]int16 - v2, changed = fastpathTV.DecMapInt8Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]int32: - fastpathTV.DecMapInt8Int32V(v, false, d) - case *map[int8]int32: - var v2 map[int8]int32 - v2, changed = fastpathTV.DecMapInt8Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]int64: - fastpathTV.DecMapInt8Int64V(v, false, d) - case *map[int8]int64: - var v2 map[int8]int64 - v2, changed = fastpathTV.DecMapInt8Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]float32: - fastpathTV.DecMapInt8Float32V(v, false, d) - case *map[int8]float32: - var v2 map[int8]float32 - v2, changed = fastpathTV.DecMapInt8Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]float64: - fastpathTV.DecMapInt8Float64V(v, false, d) - case *map[int8]float64: - var v2 map[int8]float64 - v2, changed = fastpathTV.DecMapInt8Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[int8]bool: - fastpathTV.DecMapInt8BoolV(v, false, d) - case *map[int8]bool: - var v2 map[int8]bool - v2, changed = fastpathTV.DecMapInt8BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[int16]interface{}: - fastpathTV.DecMapInt16IntfV(v, false, d) - case *map[int16]interface{}: - var v2 map[int16]interface{} - v2, changed = fastpathTV.DecMapInt16IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[int16]string: - fastpathTV.DecMapInt16StringV(v, false, d) - case *map[int16]string: - var v2 map[int16]string - v2, changed = fastpathTV.DecMapInt16StringV(*v, true, d) - if changed { - *v = v2 - } - case map[int16]uint: - fastpathTV.DecMapInt16UintV(v, false, d) - case *map[int16]uint: - var v2 map[int16]uint - v2, changed = fastpathTV.DecMapInt16UintV(*v, true, d) - if changed { - *v = v2 - } - case map[int16]uint8: - fastpathTV.DecMapInt16Uint8V(v, false, d) - case *map[int16]uint8: - var v2 map[int16]uint8 - v2, changed = fastpathTV.DecMapInt16Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]uint16: - fastpathTV.DecMapInt16Uint16V(v, false, d) - case *map[int16]uint16: - var v2 map[int16]uint16 - v2, changed = fastpathTV.DecMapInt16Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]uint32: - fastpathTV.DecMapInt16Uint32V(v, false, d) - case *map[int16]uint32: - var v2 map[int16]uint32 - v2, changed = fastpathTV.DecMapInt16Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]uint64: - fastpathTV.DecMapInt16Uint64V(v, false, d) - case *map[int16]uint64: - var v2 map[int16]uint64 - v2, changed = fastpathTV.DecMapInt16Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]uintptr: - fastpathTV.DecMapInt16UintptrV(v, false, d) - case *map[int16]uintptr: - var v2 map[int16]uintptr - v2, changed = fastpathTV.DecMapInt16UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[int16]int: - fastpathTV.DecMapInt16IntV(v, false, d) - case *map[int16]int: - var v2 map[int16]int - v2, changed = fastpathTV.DecMapInt16IntV(*v, true, d) - if changed { - *v = v2 - } - case map[int16]int8: - fastpathTV.DecMapInt16Int8V(v, false, d) - case *map[int16]int8: - var v2 map[int16]int8 - v2, changed = fastpathTV.DecMapInt16Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]int16: - fastpathTV.DecMapInt16Int16V(v, false, d) - case *map[int16]int16: - var v2 map[int16]int16 - v2, changed = fastpathTV.DecMapInt16Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]int32: - fastpathTV.DecMapInt16Int32V(v, false, d) - case *map[int16]int32: - var v2 map[int16]int32 - v2, changed = fastpathTV.DecMapInt16Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]int64: - fastpathTV.DecMapInt16Int64V(v, false, d) - case *map[int16]int64: - var v2 map[int16]int64 - v2, changed = fastpathTV.DecMapInt16Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]float32: - fastpathTV.DecMapInt16Float32V(v, false, d) - case *map[int16]float32: - var v2 map[int16]float32 - v2, changed = fastpathTV.DecMapInt16Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]float64: - fastpathTV.DecMapInt16Float64V(v, false, d) - case *map[int16]float64: - var v2 map[int16]float64 - v2, changed = fastpathTV.DecMapInt16Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[int16]bool: - fastpathTV.DecMapInt16BoolV(v, false, d) - case *map[int16]bool: - var v2 map[int16]bool - v2, changed = fastpathTV.DecMapInt16BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[int32]interface{}: - fastpathTV.DecMapInt32IntfV(v, false, d) - case *map[int32]interface{}: - var v2 map[int32]interface{} - v2, changed = fastpathTV.DecMapInt32IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[int32]string: - fastpathTV.DecMapInt32StringV(v, false, d) - case *map[int32]string: - var v2 map[int32]string - v2, changed = fastpathTV.DecMapInt32StringV(*v, true, d) - if changed { - *v = v2 - } - case map[int32]uint: - fastpathTV.DecMapInt32UintV(v, false, d) - case *map[int32]uint: - var v2 map[int32]uint - v2, changed = fastpathTV.DecMapInt32UintV(*v, true, d) - if changed { - *v = v2 - } - case map[int32]uint8: - fastpathTV.DecMapInt32Uint8V(v, false, d) - case *map[int32]uint8: - var v2 map[int32]uint8 - v2, changed = fastpathTV.DecMapInt32Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]uint16: - fastpathTV.DecMapInt32Uint16V(v, false, d) - case *map[int32]uint16: - var v2 map[int32]uint16 - v2, changed = fastpathTV.DecMapInt32Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]uint32: - fastpathTV.DecMapInt32Uint32V(v, false, d) - case *map[int32]uint32: - var v2 map[int32]uint32 - v2, changed = fastpathTV.DecMapInt32Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]uint64: - fastpathTV.DecMapInt32Uint64V(v, false, d) - case *map[int32]uint64: - var v2 map[int32]uint64 - v2, changed = fastpathTV.DecMapInt32Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]uintptr: - fastpathTV.DecMapInt32UintptrV(v, false, d) - case *map[int32]uintptr: - var v2 map[int32]uintptr - v2, changed = fastpathTV.DecMapInt32UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[int32]int: - fastpathTV.DecMapInt32IntV(v, false, d) - case *map[int32]int: - var v2 map[int32]int - v2, changed = fastpathTV.DecMapInt32IntV(*v, true, d) - if changed { - *v = v2 - } - case map[int32]int8: - fastpathTV.DecMapInt32Int8V(v, false, d) - case *map[int32]int8: - var v2 map[int32]int8 - v2, changed = fastpathTV.DecMapInt32Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]int16: - fastpathTV.DecMapInt32Int16V(v, false, d) - case *map[int32]int16: - var v2 map[int32]int16 - v2, changed = fastpathTV.DecMapInt32Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]int32: - fastpathTV.DecMapInt32Int32V(v, false, d) - case *map[int32]int32: - var v2 map[int32]int32 - v2, changed = fastpathTV.DecMapInt32Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]int64: - fastpathTV.DecMapInt32Int64V(v, false, d) - case *map[int32]int64: - var v2 map[int32]int64 - v2, changed = fastpathTV.DecMapInt32Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]float32: - fastpathTV.DecMapInt32Float32V(v, false, d) - case *map[int32]float32: - var v2 map[int32]float32 - v2, changed = fastpathTV.DecMapInt32Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]float64: - fastpathTV.DecMapInt32Float64V(v, false, d) - case *map[int32]float64: - var v2 map[int32]float64 - v2, changed = fastpathTV.DecMapInt32Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[int32]bool: - fastpathTV.DecMapInt32BoolV(v, false, d) - case *map[int32]bool: - var v2 map[int32]bool - v2, changed = fastpathTV.DecMapInt32BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[int64]interface{}: - fastpathTV.DecMapInt64IntfV(v, false, d) - case *map[int64]interface{}: - var v2 map[int64]interface{} - v2, changed = fastpathTV.DecMapInt64IntfV(*v, true, d) - if changed { - *v = v2 - } - case map[int64]string: - fastpathTV.DecMapInt64StringV(v, false, d) - case *map[int64]string: - var v2 map[int64]string - v2, changed = fastpathTV.DecMapInt64StringV(*v, true, d) - if changed { - *v = v2 - } - case map[int64]uint: - fastpathTV.DecMapInt64UintV(v, false, d) - case *map[int64]uint: - var v2 map[int64]uint - v2, changed = fastpathTV.DecMapInt64UintV(*v, true, d) - if changed { - *v = v2 - } - case map[int64]uint8: - fastpathTV.DecMapInt64Uint8V(v, false, d) - case *map[int64]uint8: - var v2 map[int64]uint8 - v2, changed = fastpathTV.DecMapInt64Uint8V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]uint16: - fastpathTV.DecMapInt64Uint16V(v, false, d) - case *map[int64]uint16: - var v2 map[int64]uint16 - v2, changed = fastpathTV.DecMapInt64Uint16V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]uint32: - fastpathTV.DecMapInt64Uint32V(v, false, d) - case *map[int64]uint32: - var v2 map[int64]uint32 - v2, changed = fastpathTV.DecMapInt64Uint32V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]uint64: - fastpathTV.DecMapInt64Uint64V(v, false, d) - case *map[int64]uint64: - var v2 map[int64]uint64 - v2, changed = fastpathTV.DecMapInt64Uint64V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]uintptr: - fastpathTV.DecMapInt64UintptrV(v, false, d) - case *map[int64]uintptr: - var v2 map[int64]uintptr - v2, changed = fastpathTV.DecMapInt64UintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[int64]int: - fastpathTV.DecMapInt64IntV(v, false, d) - case *map[int64]int: - var v2 map[int64]int - v2, changed = fastpathTV.DecMapInt64IntV(*v, true, d) - if changed { - *v = v2 - } - case map[int64]int8: - fastpathTV.DecMapInt64Int8V(v, false, d) - case *map[int64]int8: - var v2 map[int64]int8 - v2, changed = fastpathTV.DecMapInt64Int8V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]int16: - fastpathTV.DecMapInt64Int16V(v, false, d) - case *map[int64]int16: - var v2 map[int64]int16 - v2, changed = fastpathTV.DecMapInt64Int16V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]int32: - fastpathTV.DecMapInt64Int32V(v, false, d) - case *map[int64]int32: - var v2 map[int64]int32 - v2, changed = fastpathTV.DecMapInt64Int32V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]int64: - fastpathTV.DecMapInt64Int64V(v, false, d) - case *map[int64]int64: - var v2 map[int64]int64 - v2, changed = fastpathTV.DecMapInt64Int64V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]float32: - fastpathTV.DecMapInt64Float32V(v, false, d) - case *map[int64]float32: - var v2 map[int64]float32 - v2, changed = fastpathTV.DecMapInt64Float32V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]float64: - fastpathTV.DecMapInt64Float64V(v, false, d) - case *map[int64]float64: - var v2 map[int64]float64 - v2, changed = fastpathTV.DecMapInt64Float64V(*v, true, d) - if changed { - *v = v2 - } - case map[int64]bool: - fastpathTV.DecMapInt64BoolV(v, false, d) - case *map[int64]bool: - var v2 map[int64]bool - v2, changed = fastpathTV.DecMapInt64BoolV(*v, true, d) - if changed { - *v = v2 - } - case map[bool]interface{}: - fastpathTV.DecMapBoolIntfV(v, false, d) - case *map[bool]interface{}: - var v2 map[bool]interface{} - v2, changed = fastpathTV.DecMapBoolIntfV(*v, true, d) - if changed { - *v = v2 - } - case map[bool]string: - fastpathTV.DecMapBoolStringV(v, false, d) - case *map[bool]string: - var v2 map[bool]string - v2, changed = fastpathTV.DecMapBoolStringV(*v, true, d) - if changed { - *v = v2 - } - case map[bool]uint: - fastpathTV.DecMapBoolUintV(v, false, d) - case *map[bool]uint: - var v2 map[bool]uint - v2, changed = fastpathTV.DecMapBoolUintV(*v, true, d) - if changed { - *v = v2 - } - case map[bool]uint8: - fastpathTV.DecMapBoolUint8V(v, false, d) - case *map[bool]uint8: - var v2 map[bool]uint8 - v2, changed = fastpathTV.DecMapBoolUint8V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]uint16: - fastpathTV.DecMapBoolUint16V(v, false, d) - case *map[bool]uint16: - var v2 map[bool]uint16 - v2, changed = fastpathTV.DecMapBoolUint16V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]uint32: - fastpathTV.DecMapBoolUint32V(v, false, d) - case *map[bool]uint32: - var v2 map[bool]uint32 - v2, changed = fastpathTV.DecMapBoolUint32V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]uint64: - fastpathTV.DecMapBoolUint64V(v, false, d) - case *map[bool]uint64: - var v2 map[bool]uint64 - v2, changed = fastpathTV.DecMapBoolUint64V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]uintptr: - fastpathTV.DecMapBoolUintptrV(v, false, d) - case *map[bool]uintptr: - var v2 map[bool]uintptr - v2, changed = fastpathTV.DecMapBoolUintptrV(*v, true, d) - if changed { - *v = v2 - } - case map[bool]int: - fastpathTV.DecMapBoolIntV(v, false, d) - case *map[bool]int: - var v2 map[bool]int - v2, changed = fastpathTV.DecMapBoolIntV(*v, true, d) - if changed { - *v = v2 - } - case map[bool]int8: - fastpathTV.DecMapBoolInt8V(v, false, d) - case *map[bool]int8: - var v2 map[bool]int8 - v2, changed = fastpathTV.DecMapBoolInt8V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]int16: - fastpathTV.DecMapBoolInt16V(v, false, d) - case *map[bool]int16: - var v2 map[bool]int16 - v2, changed = fastpathTV.DecMapBoolInt16V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]int32: - fastpathTV.DecMapBoolInt32V(v, false, d) - case *map[bool]int32: - var v2 map[bool]int32 - v2, changed = fastpathTV.DecMapBoolInt32V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]int64: - fastpathTV.DecMapBoolInt64V(v, false, d) - case *map[bool]int64: - var v2 map[bool]int64 - v2, changed = fastpathTV.DecMapBoolInt64V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]float32: - fastpathTV.DecMapBoolFloat32V(v, false, d) - case *map[bool]float32: - var v2 map[bool]float32 - v2, changed = fastpathTV.DecMapBoolFloat32V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]float64: - fastpathTV.DecMapBoolFloat64V(v, false, d) - case *map[bool]float64: - var v2 map[bool]float64 - v2, changed = fastpathTV.DecMapBoolFloat64V(*v, true, d) - if changed { - *v = v2 - } - case map[bool]bool: - fastpathTV.DecMapBoolBoolV(v, false, d) - case *map[bool]bool: - var v2 map[bool]bool - v2, changed = fastpathTV.DecMapBoolBoolV(*v, true, d) - if changed { - *v = v2 - } - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { - switch v := iv.(type) { - - case *[]interface{}: - *v = nil - case *[]string: - *v = nil - case *[]float32: - *v = nil - case *[]float64: - *v = nil - case *[]uint: - *v = nil - case *[]uint8: - *v = nil - case *[]uint16: - *v = nil - case *[]uint32: - *v = nil - case *[]uint64: - *v = nil - case *[]uintptr: - *v = nil - case *[]int: - *v = nil - case *[]int8: - *v = nil - case *[]int16: - *v = nil - case *[]int32: - *v = nil - case *[]int64: - *v = nil - case *[]bool: - *v = nil - - case *map[interface{}]interface{}: - *v = nil - case *map[interface{}]string: - *v = nil - case *map[interface{}]uint: - *v = nil - case *map[interface{}]uint8: - *v = nil - case *map[interface{}]uint16: - *v = nil - case *map[interface{}]uint32: - *v = nil - case *map[interface{}]uint64: - *v = nil - case *map[interface{}]uintptr: - *v = nil - case *map[interface{}]int: - *v = nil - case *map[interface{}]int8: - *v = nil - case *map[interface{}]int16: - *v = nil - case *map[interface{}]int32: - *v = nil - case *map[interface{}]int64: - *v = nil - case *map[interface{}]float32: - *v = nil - case *map[interface{}]float64: - *v = nil - case *map[interface{}]bool: - *v = nil - case *map[string]interface{}: - *v = nil - case *map[string]string: - *v = nil - case *map[string]uint: - *v = nil - case *map[string]uint8: - *v = nil - case *map[string]uint16: - *v = nil - case *map[string]uint32: - *v = nil - case *map[string]uint64: - *v = nil - case *map[string]uintptr: - *v = nil - case *map[string]int: - *v = nil - case *map[string]int8: - *v = nil - case *map[string]int16: - *v = nil - case *map[string]int32: - *v = nil - case *map[string]int64: - *v = nil - case *map[string]float32: - *v = nil - case *map[string]float64: - *v = nil - case *map[string]bool: - *v = nil - case *map[float32]interface{}: - *v = nil - case *map[float32]string: - *v = nil - case *map[float32]uint: - *v = nil - case *map[float32]uint8: - *v = nil - case *map[float32]uint16: - *v = nil - case *map[float32]uint32: - *v = nil - case *map[float32]uint64: - *v = nil - case *map[float32]uintptr: - *v = nil - case *map[float32]int: - *v = nil - case *map[float32]int8: - *v = nil - case *map[float32]int16: - *v = nil - case *map[float32]int32: - *v = nil - case *map[float32]int64: - *v = nil - case *map[float32]float32: - *v = nil - case *map[float32]float64: - *v = nil - case *map[float32]bool: - *v = nil - case *map[float64]interface{}: - *v = nil - case *map[float64]string: - *v = nil - case *map[float64]uint: - *v = nil - case *map[float64]uint8: - *v = nil - case *map[float64]uint16: - *v = nil - case *map[float64]uint32: - *v = nil - case *map[float64]uint64: - *v = nil - case *map[float64]uintptr: - *v = nil - case *map[float64]int: - *v = nil - case *map[float64]int8: - *v = nil - case *map[float64]int16: - *v = nil - case *map[float64]int32: - *v = nil - case *map[float64]int64: - *v = nil - case *map[float64]float32: - *v = nil - case *map[float64]float64: - *v = nil - case *map[float64]bool: - *v = nil - case *map[uint]interface{}: - *v = nil - case *map[uint]string: - *v = nil - case *map[uint]uint: - *v = nil - case *map[uint]uint8: - *v = nil - case *map[uint]uint16: - *v = nil - case *map[uint]uint32: - *v = nil - case *map[uint]uint64: - *v = nil - case *map[uint]uintptr: - *v = nil - case *map[uint]int: - *v = nil - case *map[uint]int8: - *v = nil - case *map[uint]int16: - *v = nil - case *map[uint]int32: - *v = nil - case *map[uint]int64: - *v = nil - case *map[uint]float32: - *v = nil - case *map[uint]float64: - *v = nil - case *map[uint]bool: - *v = nil - case *map[uint8]interface{}: - *v = nil - case *map[uint8]string: - *v = nil - case *map[uint8]uint: - *v = nil - case *map[uint8]uint8: - *v = nil - case *map[uint8]uint16: - *v = nil - case *map[uint8]uint32: - *v = nil - case *map[uint8]uint64: - *v = nil - case *map[uint8]uintptr: - *v = nil - case *map[uint8]int: - *v = nil - case *map[uint8]int8: - *v = nil - case *map[uint8]int16: - *v = nil - case *map[uint8]int32: - *v = nil - case *map[uint8]int64: - *v = nil - case *map[uint8]float32: - *v = nil - case *map[uint8]float64: - *v = nil - case *map[uint8]bool: - *v = nil - case *map[uint16]interface{}: - *v = nil - case *map[uint16]string: - *v = nil - case *map[uint16]uint: - *v = nil - case *map[uint16]uint8: - *v = nil - case *map[uint16]uint16: - *v = nil - case *map[uint16]uint32: - *v = nil - case *map[uint16]uint64: - *v = nil - case *map[uint16]uintptr: - *v = nil - case *map[uint16]int: - *v = nil - case *map[uint16]int8: - *v = nil - case *map[uint16]int16: - *v = nil - case *map[uint16]int32: - *v = nil - case *map[uint16]int64: - *v = nil - case *map[uint16]float32: - *v = nil - case *map[uint16]float64: - *v = nil - case *map[uint16]bool: - *v = nil - case *map[uint32]interface{}: - *v = nil - case *map[uint32]string: - *v = nil - case *map[uint32]uint: - *v = nil - case *map[uint32]uint8: - *v = nil - case *map[uint32]uint16: - *v = nil - case *map[uint32]uint32: - *v = nil - case *map[uint32]uint64: - *v = nil - case *map[uint32]uintptr: - *v = nil - case *map[uint32]int: - *v = nil - case *map[uint32]int8: - *v = nil - case *map[uint32]int16: - *v = nil - case *map[uint32]int32: - *v = nil - case *map[uint32]int64: - *v = nil - case *map[uint32]float32: - *v = nil - case *map[uint32]float64: - *v = nil - case *map[uint32]bool: - *v = nil - case *map[uint64]interface{}: - *v = nil - case *map[uint64]string: - *v = nil - case *map[uint64]uint: - *v = nil - case *map[uint64]uint8: - *v = nil - case *map[uint64]uint16: - *v = nil - case *map[uint64]uint32: - *v = nil - case *map[uint64]uint64: - *v = nil - case *map[uint64]uintptr: - *v = nil - case *map[uint64]int: - *v = nil - case *map[uint64]int8: - *v = nil - case *map[uint64]int16: - *v = nil - case *map[uint64]int32: - *v = nil - case *map[uint64]int64: - *v = nil - case *map[uint64]float32: - *v = nil - case *map[uint64]float64: - *v = nil - case *map[uint64]bool: - *v = nil - case *map[uintptr]interface{}: - *v = nil - case *map[uintptr]string: - *v = nil - case *map[uintptr]uint: - *v = nil - case *map[uintptr]uint8: - *v = nil - case *map[uintptr]uint16: - *v = nil - case *map[uintptr]uint32: - *v = nil - case *map[uintptr]uint64: - *v = nil - case *map[uintptr]uintptr: - *v = nil - case *map[uintptr]int: - *v = nil - case *map[uintptr]int8: - *v = nil - case *map[uintptr]int16: - *v = nil - case *map[uintptr]int32: - *v = nil - case *map[uintptr]int64: - *v = nil - case *map[uintptr]float32: - *v = nil - case *map[uintptr]float64: - *v = nil - case *map[uintptr]bool: - *v = nil - case *map[int]interface{}: - *v = nil - case *map[int]string: - *v = nil - case *map[int]uint: - *v = nil - case *map[int]uint8: - *v = nil - case *map[int]uint16: - *v = nil - case *map[int]uint32: - *v = nil - case *map[int]uint64: - *v = nil - case *map[int]uintptr: - *v = nil - case *map[int]int: - *v = nil - case *map[int]int8: - *v = nil - case *map[int]int16: - *v = nil - case *map[int]int32: - *v = nil - case *map[int]int64: - *v = nil - case *map[int]float32: - *v = nil - case *map[int]float64: - *v = nil - case *map[int]bool: - *v = nil - case *map[int8]interface{}: - *v = nil - case *map[int8]string: - *v = nil - case *map[int8]uint: - *v = nil - case *map[int8]uint8: - *v = nil - case *map[int8]uint16: - *v = nil - case *map[int8]uint32: - *v = nil - case *map[int8]uint64: - *v = nil - case *map[int8]uintptr: - *v = nil - case *map[int8]int: - *v = nil - case *map[int8]int8: - *v = nil - case *map[int8]int16: - *v = nil - case *map[int8]int32: - *v = nil - case *map[int8]int64: - *v = nil - case *map[int8]float32: - *v = nil - case *map[int8]float64: - *v = nil - case *map[int8]bool: - *v = nil - case *map[int16]interface{}: - *v = nil - case *map[int16]string: - *v = nil - case *map[int16]uint: - *v = nil - case *map[int16]uint8: - *v = nil - case *map[int16]uint16: - *v = nil - case *map[int16]uint32: - *v = nil - case *map[int16]uint64: - *v = nil - case *map[int16]uintptr: - *v = nil - case *map[int16]int: - *v = nil - case *map[int16]int8: - *v = nil - case *map[int16]int16: - *v = nil - case *map[int16]int32: - *v = nil - case *map[int16]int64: - *v = nil - case *map[int16]float32: - *v = nil - case *map[int16]float64: - *v = nil - case *map[int16]bool: - *v = nil - case *map[int32]interface{}: - *v = nil - case *map[int32]string: - *v = nil - case *map[int32]uint: - *v = nil - case *map[int32]uint8: - *v = nil - case *map[int32]uint16: - *v = nil - case *map[int32]uint32: - *v = nil - case *map[int32]uint64: - *v = nil - case *map[int32]uintptr: - *v = nil - case *map[int32]int: - *v = nil - case *map[int32]int8: - *v = nil - case *map[int32]int16: - *v = nil - case *map[int32]int32: - *v = nil - case *map[int32]int64: - *v = nil - case *map[int32]float32: - *v = nil - case *map[int32]float64: - *v = nil - case *map[int32]bool: - *v = nil - case *map[int64]interface{}: - *v = nil - case *map[int64]string: - *v = nil - case *map[int64]uint: - *v = nil - case *map[int64]uint8: - *v = nil - case *map[int64]uint16: - *v = nil - case *map[int64]uint32: - *v = nil - case *map[int64]uint64: - *v = nil - case *map[int64]uintptr: - *v = nil - case *map[int64]int: - *v = nil - case *map[int64]int8: - *v = nil - case *map[int64]int16: - *v = nil - case *map[int64]int32: - *v = nil - case *map[int64]int64: - *v = nil - case *map[int64]float32: - *v = nil - case *map[int64]float64: - *v = nil - case *map[int64]bool: - *v = nil - case *map[bool]interface{}: - *v = nil - case *map[bool]string: - *v = nil - case *map[bool]uint: - *v = nil - case *map[bool]uint8: - *v = nil - case *map[bool]uint16: - *v = nil - case *map[bool]uint32: - *v = nil - case *map[bool]uint64: - *v = nil - case *map[bool]uintptr: - *v = nil - case *map[bool]int: - *v = nil - case *map[bool]int8: - *v = nil - case *map[bool]int16: - *v = nil - case *map[bool]int32: - *v = nil - case *map[bool]int64: - *v = nil - case *map[bool]float32: - *v = nil - case *map[bool]float64: - *v = nil - case *map[bool]bool: - *v = nil - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -// -- -- fast path functions - -func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]interface{}) - v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]interface{}) - v2, changed := fastpathTV.DecSliceIntfV(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { - v, changed := f.DecSliceIntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]interface{}, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - } else { - xlen = 8 - } - v = make([]interface{}, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, nil) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = nil - } else { - d.decode(&v[uint(j)]) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]interface{}, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]string) - v, changed := fastpathTV.DecSliceStringV(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]string) - v2, changed := fastpathTV.DecSliceStringV(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { - v, changed := f.DecSliceStringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]string, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - } else { - xlen = 8 - } - v = make([]string, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, "") - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = "" - } else { - v[uint(j)] = dd.DecodeString() - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]string, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]float32) - v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]float32) - v2, changed := fastpathTV.DecSliceFloat32V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { - v, changed := f.DecSliceFloat32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]float32, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - } else { - xlen = 8 - } - v = make([]float32, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = float32(chkOvf.Float32V(dd.DecodeFloat64())) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]float32, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]float64) - v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]float64) - v2, changed := fastpathTV.DecSliceFloat64V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { - v, changed := f.DecSliceFloat64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]float64, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]float64, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = dd.DecodeFloat64() - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]float64, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]uint) - v, changed := fastpathTV.DecSliceUintV(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]uint) - v2, changed := fastpathTV.DecSliceUintV(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) { - v, changed := f.DecSliceUintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]uint, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]uint, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]uint, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]uint8) - v, changed := fastpathTV.DecSliceUint8V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]uint8) - v2, changed := fastpathTV.DecSliceUint8V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) { - v, changed := f.DecSliceUint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]uint8, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - } else { - xlen = 8 - } - v = make([]uint8, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]uint8, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]uint16) - v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]uint16) - v2, changed := fastpathTV.DecSliceUint16V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) { - v, changed := f.DecSliceUint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]uint16, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) - } else { - xlen = 8 - } - v = make([]uint16, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]uint16, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]uint32) - v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]uint32) - v2, changed := fastpathTV.DecSliceUint32V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) { - v, changed := f.DecSliceUint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]uint32, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - } else { - xlen = 8 - } - v = make([]uint32, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]uint32, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]uint64) - v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]uint64) - v2, changed := fastpathTV.DecSliceUint64V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { - v, changed := f.DecSliceUint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]uint64, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]uint64, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = dd.DecodeUint64() - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]uint64, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]uintptr) - v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]uintptr) - v2, changed := fastpathTV.DecSliceUintptrV(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) { - v, changed := f.DecSliceUintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]uintptr, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]uintptr, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]uintptr, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]int) - v, changed := fastpathTV.DecSliceIntV(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]int) - v2, changed := fastpathTV.DecSliceIntV(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { - v, changed := f.DecSliceIntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]int, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]int, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]int, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]int8) - v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]int8) - v2, changed := fastpathTV.DecSliceInt8V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) { - v, changed := f.DecSliceInt8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]int8, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - } else { - xlen = 8 - } - v = make([]int8, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]int8, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]int16) - v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]int16) - v2, changed := fastpathTV.DecSliceInt16V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) { - v, changed := f.DecSliceInt16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]int16, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) - } else { - xlen = 8 - } - v = make([]int16, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]int16, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]int32) - v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]int32) - v2, changed := fastpathTV.DecSliceInt32V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { - v, changed := f.DecSliceInt32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]int32, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - } else { - xlen = 8 - } - v = make([]int32, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]int32, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]int64) - v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]int64) - v2, changed := fastpathTV.DecSliceInt64V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { - v, changed := f.DecSliceInt64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]int64, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - } else { - xlen = 8 - } - v = make([]int64, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = 0 - } else { - v[uint(j)] = dd.DecodeInt64() - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]int64, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]bool) - v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d) - if changed { - *vp = v - } - } else { - v := rv2i(rv).([]bool) - v2, changed := fastpathTV.DecSliceBoolV(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { - v, changed := f.DecSliceBoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) { - dd := d.d - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]bool, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - } else { - xlen = 8 - } - v = make([]bool, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, false) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = false - } else { - v[uint(j)] = dd.DecodeBool() - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]bool, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]interface{}) - v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d) - } -} -func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) { - v, changed := f.DecMapIntfIntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool, - d *Decoder) (_ map[interface{}]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk interface{} - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]string) - v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d) - } -} -func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) { - v, changed := f.DecMapIntfStringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, - d *Decoder) (_ map[interface{}]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint) - v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d) - } -} -func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) { - v, changed := f.DecMapIntfUintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, - d *Decoder) (_ map[interface{}]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint8) - v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d) - } -} -func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) { - v, changed := f.DecMapIntfUint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, - d *Decoder) (_ map[interface{}]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint16) - v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d) - } -} -func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) { - v, changed := f.DecMapIntfUint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, - d *Decoder) (_ map[interface{}]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint32) - v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d) - } -} -func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) { - v, changed := f.DecMapIntfUint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, - d *Decoder) (_ map[interface{}]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uint64) - v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d) - } -} -func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) { - v, changed := f.DecMapIntfUint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, - d *Decoder) (_ map[interface{}]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]uintptr) - v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d) - } -} -func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) { - v, changed := f.DecMapIntfUintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, - d *Decoder) (_ map[interface{}]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int) - v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d) - } -} -func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) { - v, changed := f.DecMapIntfIntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, - d *Decoder) (_ map[interface{}]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int8) - v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d) - } -} -func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) { - v, changed := f.DecMapIntfInt8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, - d *Decoder) (_ map[interface{}]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int16) - v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d) - } -} -func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) { - v, changed := f.DecMapIntfInt16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, - d *Decoder) (_ map[interface{}]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int32) - v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d) - } -} -func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) { - v, changed := f.DecMapIntfInt32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, - d *Decoder) (_ map[interface{}]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]int64) - v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d) - } -} -func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) { - v, changed := f.DecMapIntfInt64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, - d *Decoder) (_ map[interface{}]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]float32) - v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d) - } -} -func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) { - v, changed := f.DecMapIntfFloat32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, - d *Decoder) (_ map[interface{}]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]float64) - v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d) - } -} -func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) { - v, changed := f.DecMapIntfFloat64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, - d *Decoder) (_ map[interface{}]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[interface{}]bool) - v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d) - } -} -func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) { - v, changed := f.DecMapIntfBoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, - d *Decoder) (_ map[interface{}]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk interface{} - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]interface{}) - v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d) - } -} -func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { - v, changed := f.DecMapStringIntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, - d *Decoder) (_ map[string]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk string - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]string) - v, changed := fastpathTV.DecMapStringStringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d) - } -} -func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { - v, changed := f.DecMapStringStringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, - d *Decoder) (_ map[string]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint) - v, changed := fastpathTV.DecMapStringUintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d) - } -} -func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) { - v, changed := f.DecMapStringUintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, - d *Decoder) (_ map[string]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint8) - v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d) - } -} -func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { - v, changed := f.DecMapStringUint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, - d *Decoder) (_ map[string]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint16) - v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d) - } -} -func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) { - v, changed := f.DecMapStringUint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, - d *Decoder) (_ map[string]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint32) - v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d) - } -} -func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) { - v, changed := f.DecMapStringUint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, - d *Decoder) (_ map[string]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uint64) - v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d) - } -} -func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { - v, changed := f.DecMapStringUint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, - d *Decoder) (_ map[string]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]uintptr) - v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d) - } -} -func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) { - v, changed := f.DecMapStringUintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, - d *Decoder) (_ map[string]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int) - v, changed := fastpathTV.DecMapStringIntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d) - } -} -func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { - v, changed := f.DecMapStringIntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, - d *Decoder) (_ map[string]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int8) - v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d) - } -} -func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) { - v, changed := f.DecMapStringInt8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, - d *Decoder) (_ map[string]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int16) - v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d) - } -} -func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) { - v, changed := f.DecMapStringInt16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, - d *Decoder) (_ map[string]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int32) - v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d) - } -} -func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { - v, changed := f.DecMapStringInt32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, - d *Decoder) (_ map[string]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]int64) - v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d) - } -} -func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) { - v, changed := f.DecMapStringInt64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, - d *Decoder) (_ map[string]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]float32) - v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d) - } -} -func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) { - v, changed := f.DecMapStringFloat32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, - d *Decoder) (_ map[string]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]float64) - v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d) - } -} -func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { - v, changed := f.DecMapStringFloat64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, - d *Decoder) (_ map[string]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[string]bool) - v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d) - } -} -func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { - v, changed := f.DecMapStringBoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, - d *Decoder) (_ map[string]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk string - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeString() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]interface{}) - v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d) - } -} -func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) { - v, changed := f.DecMapFloat32IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, - d *Decoder) (_ map[float32]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk float32 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]string) - v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d) - } -} -func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) { - v, changed := f.DecMapFloat32StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, - d *Decoder) (_ map[float32]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint) - v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d) - } -} -func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) { - v, changed := f.DecMapFloat32UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, - d *Decoder) (_ map[float32]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint8) - v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d) - } -} -func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) { - v, changed := f.DecMapFloat32Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, - d *Decoder) (_ map[float32]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint16) - v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d) - } -} -func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) { - v, changed := f.DecMapFloat32Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, - d *Decoder) (_ map[float32]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint32) - v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d) - } -} -func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) { - v, changed := f.DecMapFloat32Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, - d *Decoder) (_ map[float32]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uint64) - v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d) - } -} -func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) { - v, changed := f.DecMapFloat32Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, - d *Decoder) (_ map[float32]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]uintptr) - v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d) - } -} -func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) { - v, changed := f.DecMapFloat32UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, - d *Decoder) (_ map[float32]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int) - v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d) - } -} -func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) { - v, changed := f.DecMapFloat32IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, - d *Decoder) (_ map[float32]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int8) - v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d) - } -} -func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) { - v, changed := f.DecMapFloat32Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, - d *Decoder) (_ map[float32]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int16) - v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d) - } -} -func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) { - v, changed := f.DecMapFloat32Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, - d *Decoder) (_ map[float32]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int32) - v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d) - } -} -func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) { - v, changed := f.DecMapFloat32Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, - d *Decoder) (_ map[float32]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]int64) - v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d) - } -} -func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) { - v, changed := f.DecMapFloat32Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, - d *Decoder) (_ map[float32]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]float32) - v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d) - } -} -func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) { - v, changed := f.DecMapFloat32Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, - d *Decoder) (_ map[float32]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]float64) - v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d) - } -} -func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) { - v, changed := f.DecMapFloat32Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, - d *Decoder) (_ map[float32]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float32]bool) - v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d) - } -} -func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) { - v, changed := f.DecMapFloat32BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, - d *Decoder) (_ map[float32]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float32 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]interface{}) - v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d) - } -} -func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) { - v, changed := f.DecMapFloat64IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, - d *Decoder) (_ map[float64]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk float64 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]string) - v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d) - } -} -func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) { - v, changed := f.DecMapFloat64StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, - d *Decoder) (_ map[float64]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint) - v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d) - } -} -func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) { - v, changed := f.DecMapFloat64UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, - d *Decoder) (_ map[float64]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint8) - v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d) - } -} -func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) { - v, changed := f.DecMapFloat64Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, - d *Decoder) (_ map[float64]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint16) - v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d) - } -} -func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) { - v, changed := f.DecMapFloat64Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, - d *Decoder) (_ map[float64]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint32) - v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d) - } -} -func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) { - v, changed := f.DecMapFloat64Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, - d *Decoder) (_ map[float64]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uint64) - v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d) - } -} -func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) { - v, changed := f.DecMapFloat64Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, - d *Decoder) (_ map[float64]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]uintptr) - v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d) - } -} -func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) { - v, changed := f.DecMapFloat64UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, - d *Decoder) (_ map[float64]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int) - v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d) - } -} -func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) { - v, changed := f.DecMapFloat64IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, - d *Decoder) (_ map[float64]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int8) - v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d) - } -} -func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) { - v, changed := f.DecMapFloat64Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, - d *Decoder) (_ map[float64]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int16) - v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d) - } -} -func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) { - v, changed := f.DecMapFloat64Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, - d *Decoder) (_ map[float64]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int32) - v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d) - } -} -func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) { - v, changed := f.DecMapFloat64Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, - d *Decoder) (_ map[float64]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]int64) - v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d) - } -} -func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) { - v, changed := f.DecMapFloat64Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, - d *Decoder) (_ map[float64]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]float32) - v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d) - } -} -func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) { - v, changed := f.DecMapFloat64Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, - d *Decoder) (_ map[float64]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]float64) - v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d) - } -} -func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) { - v, changed := f.DecMapFloat64Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, - d *Decoder) (_ map[float64]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[float64]bool) - v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d) - } -} -func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) { - v, changed := f.DecMapFloat64BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, - d *Decoder) (_ map[float64]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk float64 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeFloat64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]interface{}) - v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d) - } -} -func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) { - v, changed := f.DecMapUintIntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, - d *Decoder) (_ map[uint]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]string) - v, changed := fastpathTV.DecMapUintStringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d) - } -} -func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) { - v, changed := f.DecMapUintStringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, - d *Decoder) (_ map[uint]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint) - v, changed := fastpathTV.DecMapUintUintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d) - } -} -func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) { - v, changed := f.DecMapUintUintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, - d *Decoder) (_ map[uint]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint8) - v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d) - } -} -func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) { - v, changed := f.DecMapUintUint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, - d *Decoder) (_ map[uint]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint16) - v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d) - } -} -func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) { - v, changed := f.DecMapUintUint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, - d *Decoder) (_ map[uint]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint32) - v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d) - } -} -func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) { - v, changed := f.DecMapUintUint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, - d *Decoder) (_ map[uint]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uint64) - v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d) - } -} -func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) { - v, changed := f.DecMapUintUint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, - d *Decoder) (_ map[uint]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]uintptr) - v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d) - } -} -func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) { - v, changed := f.DecMapUintUintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, - d *Decoder) (_ map[uint]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int) - v, changed := fastpathTV.DecMapUintIntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d) - } -} -func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) { - v, changed := f.DecMapUintIntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, - d *Decoder) (_ map[uint]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int8) - v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d) - } -} -func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) { - v, changed := f.DecMapUintInt8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, - d *Decoder) (_ map[uint]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int16) - v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d) - } -} -func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) { - v, changed := f.DecMapUintInt16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, - d *Decoder) (_ map[uint]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int32) - v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d) - } -} -func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) { - v, changed := f.DecMapUintInt32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, - d *Decoder) (_ map[uint]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]int64) - v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d) - } -} -func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) { - v, changed := f.DecMapUintInt64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, - d *Decoder) (_ map[uint]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]float32) - v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d) - } -} -func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) { - v, changed := f.DecMapUintFloat32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, - d *Decoder) (_ map[uint]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]float64) - v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d) - } -} -func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) { - v, changed := f.DecMapUintFloat64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, - d *Decoder) (_ map[uint]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint]bool) - v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d) - } -} -func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) { - v, changed := f.DecMapUintBoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, - d *Decoder) (_ map[uint]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]interface{}) - v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d) - } -} -func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { - v, changed := f.DecMapUint8IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, - d *Decoder) (_ map[uint8]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint8 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]string) - v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d) - } -} -func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { - v, changed := f.DecMapUint8StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, - d *Decoder) (_ map[uint8]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint) - v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d) - } -} -func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) { - v, changed := f.DecMapUint8UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, - d *Decoder) (_ map[uint8]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint8) - v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d) - } -} -func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { - v, changed := f.DecMapUint8Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, - d *Decoder) (_ map[uint8]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint16) - v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d) - } -} -func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) { - v, changed := f.DecMapUint8Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, - d *Decoder) (_ map[uint8]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint32) - v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d) - } -} -func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) { - v, changed := f.DecMapUint8Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, - d *Decoder) (_ map[uint8]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uint64) - v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d) - } -} -func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { - v, changed := f.DecMapUint8Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, - d *Decoder) (_ map[uint8]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]uintptr) - v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d) - } -} -func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) { - v, changed := f.DecMapUint8UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, - d *Decoder) (_ map[uint8]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int) - v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d) - } -} -func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { - v, changed := f.DecMapUint8IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, - d *Decoder) (_ map[uint8]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int8) - v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d) - } -} -func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) { - v, changed := f.DecMapUint8Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, - d *Decoder) (_ map[uint8]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int16) - v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d) - } -} -func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) { - v, changed := f.DecMapUint8Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, - d *Decoder) (_ map[uint8]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int32) - v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d) - } -} -func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { - v, changed := f.DecMapUint8Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, - d *Decoder) (_ map[uint8]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]int64) - v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d) - } -} -func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) { - v, changed := f.DecMapUint8Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, - d *Decoder) (_ map[uint8]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]float32) - v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d) - } -} -func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) { - v, changed := f.DecMapUint8Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, - d *Decoder) (_ map[uint8]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]float64) - v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d) - } -} -func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { - v, changed := f.DecMapUint8Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, - d *Decoder) (_ map[uint8]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint8]bool) - v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d) - } -} -func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { - v, changed := f.DecMapUint8BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, - d *Decoder) (_ map[uint8]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint8 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]interface{}) - v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d) - } -} -func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) { - v, changed := f.DecMapUint16IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, - d *Decoder) (_ map[uint16]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint16 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]string) - v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d) - } -} -func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) { - v, changed := f.DecMapUint16StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, - d *Decoder) (_ map[uint16]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint) - v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d) - } -} -func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) { - v, changed := f.DecMapUint16UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, - d *Decoder) (_ map[uint16]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint8) - v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d) - } -} -func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) { - v, changed := f.DecMapUint16Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, - d *Decoder) (_ map[uint16]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint16) - v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d) - } -} -func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) { - v, changed := f.DecMapUint16Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, - d *Decoder) (_ map[uint16]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint32) - v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d) - } -} -func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) { - v, changed := f.DecMapUint16Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, - d *Decoder) (_ map[uint16]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uint64) - v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d) - } -} -func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) { - v, changed := f.DecMapUint16Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, - d *Decoder) (_ map[uint16]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]uintptr) - v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d) - } -} -func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) { - v, changed := f.DecMapUint16UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, - d *Decoder) (_ map[uint16]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int) - v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d) - } -} -func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) { - v, changed := f.DecMapUint16IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, - d *Decoder) (_ map[uint16]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int8) - v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d) - } -} -func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) { - v, changed := f.DecMapUint16Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, - d *Decoder) (_ map[uint16]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int16) - v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d) - } -} -func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) { - v, changed := f.DecMapUint16Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, - d *Decoder) (_ map[uint16]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int32) - v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d) - } -} -func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) { - v, changed := f.DecMapUint16Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, - d *Decoder) (_ map[uint16]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]int64) - v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d) - } -} -func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) { - v, changed := f.DecMapUint16Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, - d *Decoder) (_ map[uint16]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]float32) - v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d) - } -} -func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) { - v, changed := f.DecMapUint16Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, - d *Decoder) (_ map[uint16]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]float64) - v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d) - } -} -func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) { - v, changed := f.DecMapUint16Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, - d *Decoder) (_ map[uint16]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint16]bool) - v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d) - } -} -func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) { - v, changed := f.DecMapUint16BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, - d *Decoder) (_ map[uint16]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint16 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]interface{}) - v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d) - } -} -func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) { - v, changed := f.DecMapUint32IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, - d *Decoder) (_ map[uint32]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint32 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]string) - v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d) - } -} -func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) { - v, changed := f.DecMapUint32StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, - d *Decoder) (_ map[uint32]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint) - v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d) - } -} -func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) { - v, changed := f.DecMapUint32UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, - d *Decoder) (_ map[uint32]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint8) - v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d) - } -} -func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) { - v, changed := f.DecMapUint32Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, - d *Decoder) (_ map[uint32]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint16) - v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d) - } -} -func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) { - v, changed := f.DecMapUint32Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, - d *Decoder) (_ map[uint32]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint32) - v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d) - } -} -func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) { - v, changed := f.DecMapUint32Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, - d *Decoder) (_ map[uint32]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uint64) - v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d) - } -} -func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) { - v, changed := f.DecMapUint32Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, - d *Decoder) (_ map[uint32]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]uintptr) - v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d) - } -} -func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) { - v, changed := f.DecMapUint32UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, - d *Decoder) (_ map[uint32]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int) - v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d) - } -} -func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) { - v, changed := f.DecMapUint32IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, - d *Decoder) (_ map[uint32]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int8) - v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d) - } -} -func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) { - v, changed := f.DecMapUint32Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, - d *Decoder) (_ map[uint32]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int16) - v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d) - } -} -func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) { - v, changed := f.DecMapUint32Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, - d *Decoder) (_ map[uint32]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int32) - v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d) - } -} -func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) { - v, changed := f.DecMapUint32Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, - d *Decoder) (_ map[uint32]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]int64) - v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d) - } -} -func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) { - v, changed := f.DecMapUint32Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, - d *Decoder) (_ map[uint32]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]float32) - v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d) - } -} -func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) { - v, changed := f.DecMapUint32Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, - d *Decoder) (_ map[uint32]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]float64) - v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d) - } -} -func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) { - v, changed := f.DecMapUint32Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, - d *Decoder) (_ map[uint32]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint32]bool) - v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d) - } -} -func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) { - v, changed := f.DecMapUint32BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, - d *Decoder) (_ map[uint32]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint32 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]interface{}) - v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d) - } -} -func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { - v, changed := f.DecMapUint64IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, - d *Decoder) (_ map[uint64]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint64 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]string) - v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d) - } -} -func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { - v, changed := f.DecMapUint64StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, - d *Decoder) (_ map[uint64]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint) - v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d) - } -} -func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) { - v, changed := f.DecMapUint64UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, - d *Decoder) (_ map[uint64]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint8) - v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d) - } -} -func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { - v, changed := f.DecMapUint64Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, - d *Decoder) (_ map[uint64]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint16) - v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d) - } -} -func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) { - v, changed := f.DecMapUint64Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, - d *Decoder) (_ map[uint64]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint32) - v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d) - } -} -func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) { - v, changed := f.DecMapUint64Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, - d *Decoder) (_ map[uint64]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uint64) - v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d) - } -} -func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { - v, changed := f.DecMapUint64Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, - d *Decoder) (_ map[uint64]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]uintptr) - v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d) - } -} -func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) { - v, changed := f.DecMapUint64UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, - d *Decoder) (_ map[uint64]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int) - v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d) - } -} -func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { - v, changed := f.DecMapUint64IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, - d *Decoder) (_ map[uint64]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int8) - v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d) - } -} -func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) { - v, changed := f.DecMapUint64Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, - d *Decoder) (_ map[uint64]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int16) - v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d) - } -} -func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) { - v, changed := f.DecMapUint64Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, - d *Decoder) (_ map[uint64]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int32) - v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d) - } -} -func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { - v, changed := f.DecMapUint64Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, - d *Decoder) (_ map[uint64]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]int64) - v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d) - } -} -func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) { - v, changed := f.DecMapUint64Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, - d *Decoder) (_ map[uint64]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]float32) - v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d) - } -} -func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) { - v, changed := f.DecMapUint64Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, - d *Decoder) (_ map[uint64]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]float64) - v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d) - } -} -func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { - v, changed := f.DecMapUint64Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, - d *Decoder) (_ map[uint64]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uint64]bool) - v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d) - } -} -func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { - v, changed := f.DecMapUint64BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, - d *Decoder) (_ map[uint64]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uint64 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeUint64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]interface{}) - v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d) - } -} -func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) { - v, changed := f.DecMapUintptrIntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, - d *Decoder) (_ map[uintptr]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk uintptr - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]string) - v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d) - } -} -func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) { - v, changed := f.DecMapUintptrStringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, - d *Decoder) (_ map[uintptr]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint) - v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d) - } -} -func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) { - v, changed := f.DecMapUintptrUintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, - d *Decoder) (_ map[uintptr]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint8) - v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d) - } -} -func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) { - v, changed := f.DecMapUintptrUint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, - d *Decoder) (_ map[uintptr]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint16) - v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d) - } -} -func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) { - v, changed := f.DecMapUintptrUint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, - d *Decoder) (_ map[uintptr]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint32) - v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d) - } -} -func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) { - v, changed := f.DecMapUintptrUint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, - d *Decoder) (_ map[uintptr]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uint64) - v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d) - } -} -func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) { - v, changed := f.DecMapUintptrUint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, - d *Decoder) (_ map[uintptr]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]uintptr) - v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d) - } -} -func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) { - v, changed := f.DecMapUintptrUintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, - d *Decoder) (_ map[uintptr]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int) - v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d) - } -} -func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) { - v, changed := f.DecMapUintptrIntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, - d *Decoder) (_ map[uintptr]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int8) - v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d) - } -} -func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) { - v, changed := f.DecMapUintptrInt8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, - d *Decoder) (_ map[uintptr]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int16) - v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d) - } -} -func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) { - v, changed := f.DecMapUintptrInt16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, - d *Decoder) (_ map[uintptr]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int32) - v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d) - } -} -func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) { - v, changed := f.DecMapUintptrInt32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, - d *Decoder) (_ map[uintptr]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]int64) - v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d) - } -} -func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) { - v, changed := f.DecMapUintptrInt64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, - d *Decoder) (_ map[uintptr]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]float32) - v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d) - } -} -func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) { - v, changed := f.DecMapUintptrFloat32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, - d *Decoder) (_ map[uintptr]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]float64) - v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d) - } -} -func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) { - v, changed := f.DecMapUintptrFloat64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, - d *Decoder) (_ map[uintptr]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[uintptr]bool) - v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d) - } -} -func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) { - v, changed := f.DecMapUintptrBoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, - d *Decoder) (_ map[uintptr]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk uintptr - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]interface{}) - v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d) - } -} -func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { - v, changed := f.DecMapIntIntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, - d *Decoder) (_ map[int]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk int - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]string) - v, changed := fastpathTV.DecMapIntStringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d) - } -} -func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { - v, changed := f.DecMapIntStringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, - d *Decoder) (_ map[int]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint) - v, changed := fastpathTV.DecMapIntUintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d) - } -} -func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) { - v, changed := f.DecMapIntUintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, - d *Decoder) (_ map[int]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint8) - v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d) - } -} -func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { - v, changed := f.DecMapIntUint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, - d *Decoder) (_ map[int]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint16) - v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d) - } -} -func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) { - v, changed := f.DecMapIntUint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, - d *Decoder) (_ map[int]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint32) - v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d) - } -} -func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) { - v, changed := f.DecMapIntUint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, - d *Decoder) (_ map[int]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uint64) - v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d) - } -} -func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { - v, changed := f.DecMapIntUint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, - d *Decoder) (_ map[int]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]uintptr) - v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d) - } -} -func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) { - v, changed := f.DecMapIntUintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, - d *Decoder) (_ map[int]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int) - v, changed := fastpathTV.DecMapIntIntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d) - } -} -func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { - v, changed := f.DecMapIntIntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, - d *Decoder) (_ map[int]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int8) - v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d) - } -} -func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) { - v, changed := f.DecMapIntInt8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, - d *Decoder) (_ map[int]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int16) - v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d) - } -} -func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) { - v, changed := f.DecMapIntInt16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, - d *Decoder) (_ map[int]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int32) - v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d) - } -} -func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { - v, changed := f.DecMapIntInt32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, - d *Decoder) (_ map[int]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]int64) - v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d) - } -} -func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) { - v, changed := f.DecMapIntInt64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, - d *Decoder) (_ map[int]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]float32) - v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d) - } -} -func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) { - v, changed := f.DecMapIntFloat32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, - d *Decoder) (_ map[int]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]float64) - v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d) - } -} -func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { - v, changed := f.DecMapIntFloat64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, - d *Decoder) (_ map[int]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int]bool) - v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d) - } -} -func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { - v, changed := f.DecMapIntBoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, - d *Decoder) (_ map[int]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]interface{}) - v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d) - } -} -func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) { - v, changed := f.DecMapInt8IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, - d *Decoder) (_ map[int8]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk int8 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]string) - v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d) - } -} -func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) { - v, changed := f.DecMapInt8StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, - d *Decoder) (_ map[int8]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint) - v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d) - } -} -func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) { - v, changed := f.DecMapInt8UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, - d *Decoder) (_ map[int8]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint8) - v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d) - } -} -func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) { - v, changed := f.DecMapInt8Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, - d *Decoder) (_ map[int8]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint16) - v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d) - } -} -func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) { - v, changed := f.DecMapInt8Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, - d *Decoder) (_ map[int8]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint32) - v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d) - } -} -func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) { - v, changed := f.DecMapInt8Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, - d *Decoder) (_ map[int8]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uint64) - v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d) - } -} -func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) { - v, changed := f.DecMapInt8Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, - d *Decoder) (_ map[int8]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]uintptr) - v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d) - } -} -func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) { - v, changed := f.DecMapInt8UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, - d *Decoder) (_ map[int8]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int) - v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d) - } -} -func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) { - v, changed := f.DecMapInt8IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, - d *Decoder) (_ map[int8]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int8) - v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d) - } -} -func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) { - v, changed := f.DecMapInt8Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, - d *Decoder) (_ map[int8]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int16) - v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d) - } -} -func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) { - v, changed := f.DecMapInt8Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, - d *Decoder) (_ map[int8]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int32) - v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d) - } -} -func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) { - v, changed := f.DecMapInt8Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, - d *Decoder) (_ map[int8]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]int64) - v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d) - } -} -func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) { - v, changed := f.DecMapInt8Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, - d *Decoder) (_ map[int8]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]float32) - v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d) - } -} -func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) { - v, changed := f.DecMapInt8Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, - d *Decoder) (_ map[int8]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]float64) - v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d) - } -} -func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) { - v, changed := f.DecMapInt8Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, - d *Decoder) (_ map[int8]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int8]bool) - v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d) - } -} -func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) { - v, changed := f.DecMapInt8BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, - d *Decoder) (_ map[int8]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int8 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]interface{}) - v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d) - } -} -func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) { - v, changed := f.DecMapInt16IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, - d *Decoder) (_ map[int16]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk int16 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]string) - v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d) - } -} -func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) { - v, changed := f.DecMapInt16StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, - d *Decoder) (_ map[int16]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint) - v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d) - } -} -func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) { - v, changed := f.DecMapInt16UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, - d *Decoder) (_ map[int16]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint8) - v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d) - } -} -func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) { - v, changed := f.DecMapInt16Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, - d *Decoder) (_ map[int16]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint16) - v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d) - } -} -func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) { - v, changed := f.DecMapInt16Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, - d *Decoder) (_ map[int16]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint32) - v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d) - } -} -func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) { - v, changed := f.DecMapInt16Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, - d *Decoder) (_ map[int16]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uint64) - v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d) - } -} -func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) { - v, changed := f.DecMapInt16Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, - d *Decoder) (_ map[int16]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]uintptr) - v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d) - } -} -func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) { - v, changed := f.DecMapInt16UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, - d *Decoder) (_ map[int16]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int) - v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d) - } -} -func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) { - v, changed := f.DecMapInt16IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, - d *Decoder) (_ map[int16]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int8) - v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d) - } -} -func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) { - v, changed := f.DecMapInt16Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, - d *Decoder) (_ map[int16]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int16) - v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d) - } -} -func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) { - v, changed := f.DecMapInt16Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, - d *Decoder) (_ map[int16]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int32) - v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d) - } -} -func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) { - v, changed := f.DecMapInt16Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, - d *Decoder) (_ map[int16]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]int64) - v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d) - } -} -func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) { - v, changed := f.DecMapInt16Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, - d *Decoder) (_ map[int16]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]float32) - v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d) - } -} -func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) { - v, changed := f.DecMapInt16Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, - d *Decoder) (_ map[int16]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]float64) - v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d) - } -} -func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) { - v, changed := f.DecMapInt16Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, - d *Decoder) (_ map[int16]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int16]bool) - v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d) - } -} -func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) { - v, changed := f.DecMapInt16BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, - d *Decoder) (_ map[int16]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int16 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]interface{}) - v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d) - } -} -func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { - v, changed := f.DecMapInt32IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, - d *Decoder) (_ map[int32]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk int32 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]string) - v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d) - } -} -func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { - v, changed := f.DecMapInt32StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, - d *Decoder) (_ map[int32]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint) - v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d) - } -} -func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) { - v, changed := f.DecMapInt32UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, - d *Decoder) (_ map[int32]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint8) - v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d) - } -} -func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { - v, changed := f.DecMapInt32Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, - d *Decoder) (_ map[int32]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint16) - v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d) - } -} -func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) { - v, changed := f.DecMapInt32Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, - d *Decoder) (_ map[int32]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint32) - v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d) - } -} -func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) { - v, changed := f.DecMapInt32Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, - d *Decoder) (_ map[int32]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uint64) - v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d) - } -} -func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { - v, changed := f.DecMapInt32Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, - d *Decoder) (_ map[int32]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]uintptr) - v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d) - } -} -func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) { - v, changed := f.DecMapInt32UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, - d *Decoder) (_ map[int32]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int) - v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d) - } -} -func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { - v, changed := f.DecMapInt32IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, - d *Decoder) (_ map[int32]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int8) - v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d) - } -} -func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) { - v, changed := f.DecMapInt32Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, - d *Decoder) (_ map[int32]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int16) - v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d) - } -} -func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) { - v, changed := f.DecMapInt32Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, - d *Decoder) (_ map[int32]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int32) - v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d) - } -} -func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { - v, changed := f.DecMapInt32Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, - d *Decoder) (_ map[int32]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]int64) - v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d) - } -} -func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) { - v, changed := f.DecMapInt32Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, - d *Decoder) (_ map[int32]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]float32) - v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d) - } -} -func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) { - v, changed := f.DecMapInt32Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, - d *Decoder) (_ map[int32]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]float64) - v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d) - } -} -func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { - v, changed := f.DecMapInt32Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, - d *Decoder) (_ map[int32]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int32]bool) - v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d) - } -} -func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { - v, changed := f.DecMapInt32BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, - d *Decoder) (_ map[int32]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int32 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]interface{}) - v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d) - } -} -func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) { - v, changed := f.DecMapInt64IntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, - d *Decoder) (_ map[int64]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk int64 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]string) - v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d) - } -} -func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) { - v, changed := f.DecMapInt64StringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, - d *Decoder) (_ map[int64]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint) - v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d) - } -} -func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) { - v, changed := f.DecMapInt64UintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, - d *Decoder) (_ map[int64]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint8) - v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d) - } -} -func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) { - v, changed := f.DecMapInt64Uint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, - d *Decoder) (_ map[int64]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint16) - v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d) - } -} -func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) { - v, changed := f.DecMapInt64Uint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, - d *Decoder) (_ map[int64]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint32) - v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d) - } -} -func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) { - v, changed := f.DecMapInt64Uint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, - d *Decoder) (_ map[int64]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uint64) - v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d) - } -} -func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) { - v, changed := f.DecMapInt64Uint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, - d *Decoder) (_ map[int64]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]uintptr) - v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d) - } -} -func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) { - v, changed := f.DecMapInt64UintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, - d *Decoder) (_ map[int64]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int) - v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d) - } -} -func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) { - v, changed := f.DecMapInt64IntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, - d *Decoder) (_ map[int64]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int8) - v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d) - } -} -func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) { - v, changed := f.DecMapInt64Int8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, - d *Decoder) (_ map[int64]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int16) - v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d) - } -} -func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) { - v, changed := f.DecMapInt64Int16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, - d *Decoder) (_ map[int64]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int32) - v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d) - } -} -func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) { - v, changed := f.DecMapInt64Int32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, - d *Decoder) (_ map[int64]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]int64) - v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d) - } -} -func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) { - v, changed := f.DecMapInt64Int64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, - d *Decoder) (_ map[int64]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]float32) - v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d) - } -} -func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) { - v, changed := f.DecMapInt64Float32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, - d *Decoder) (_ map[int64]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]float64) - v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d) - } -} -func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) { - v, changed := f.DecMapInt64Float64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, - d *Decoder) (_ map[int64]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[int64]bool) - v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d) - } -} -func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) { - v, changed := f.DecMapInt64BoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, - d *Decoder) (_ map[int64]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk int64 - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeInt64() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]interface{}) - v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d) - } -} -func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) { - v, changed := f.DecMapBoolIntfV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, - d *Decoder) (_ map[bool]interface{}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]interface{}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk bool - var mv interface{} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = nil - } - continue - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]string) - v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d) - } -} -func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) { - v, changed := f.DecMapBoolStringV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, - d *Decoder) (_ map[bool]string, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]string, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv string - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = "" - } - continue - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint) - v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d) - } -} -func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) { - v, changed := f.DecMapBoolUintV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, - d *Decoder) (_ map[bool]uint, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv uint - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint8) - v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d) - } -} -func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) { - v, changed := f.DecMapBoolUint8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, - d *Decoder) (_ map[bool]uint8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]uint8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv uint8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint16) - v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d) - } -} -func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) { - v, changed := f.DecMapBoolUint16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, - d *Decoder) (_ map[bool]uint16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]uint16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv uint16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint32) - v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d) - } -} -func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) { - v, changed := f.DecMapBoolUint32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, - d *Decoder) (_ map[bool]uint32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]uint32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv uint32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uint64) - v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d) - } -} -func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) { - v, changed := f.DecMapBoolUint64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, - d *Decoder) (_ map[bool]uint64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv uint64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeUint64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]uintptr) - v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d) - } -} -func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) { - v, changed := f.DecMapBoolUintptrV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, - d *Decoder) (_ map[bool]uintptr, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uintptr, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv uintptr - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int) - v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d) - } -} -func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) { - v, changed := f.DecMapBoolIntV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, - d *Decoder) (_ map[bool]int, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv int - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int8) - v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d) - } -} -func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) { - v, changed := f.DecMapBoolInt8V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, - d *Decoder) (_ map[bool]int8, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]int8, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv int8 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int16) - v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d) - } -} -func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) { - v, changed := f.DecMapBoolInt16V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, - d *Decoder) (_ map[bool]int16, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]int16, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv int16 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int32) - v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d) - } -} -func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) { - v, changed := f.DecMapBoolInt32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, - d *Decoder) (_ map[bool]int32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]int32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv int32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]int64) - v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d) - } -} -func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) { - v, changed := f.DecMapBoolInt64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, - d *Decoder) (_ map[bool]int64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv int64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeInt64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]float32) - v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d) - } -} -func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) { - v, changed := f.DecMapBoolFloat32V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, - d *Decoder) (_ map[bool]float32, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]float32, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv float32 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]float64) - v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d) - } -} -func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) { - v, changed := f.DecMapBoolFloat64V(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, - d *Decoder) (_ map[bool]float64, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]float64, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv float64 - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = 0 - } - continue - } - mv = dd.DecodeFloat64() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} - -func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[bool]bool) - v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d) - if changed { - *vp = v - } - } else { - fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d) - } -} -func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) { - v, changed := f.DecMapBoolBoolV(*vp, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, - d *Decoder) (_ map[bool]bool, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators() - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]bool, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - var mk bool - var mv bool - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { - dd.ReadMapElemKey() - } - mk = dd.DecodeBool() - if esep { - dd.ReadMapElemValue() - } - if dd.TryDecodeAsNil() { - if v == nil { - } else if d.h.DeleteOnNilMapValue { - delete(v, mk) - } else { - v[mk] = false - } - continue - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl b/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl deleted file mode 100644 index 7617c435..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl +++ /dev/null @@ -1,491 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from fast-path.go.tmpl - DO NOT EDIT. - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v" - -type fastpathT struct {} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) -} - -type fastpathA [{{ .FastpathLen }}]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - // Note: we use goto (instead of for loop) so this can be inlined. - // h, i, j := 0, 0, len(x) - var h, i uint - var j = uint(len(x)) -LOOP: - if i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - goto LOOP - } - if i < uint(len(x)) && x[i].rtid == rtid { - return int(i) - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[uint(i)].rtid < x[uint(j)].rtid } -func (x fastpathAslice) Swap(i, j int) { x[uint(i)], x[uint(j)] = x[uint(j)], x[uint(i)] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - var i uint = 0 - fn := func(v interface{}, - fe func(*Encoder, *codecFnInfo, reflect.Value), - fd func(*Decoder, *codecFnInfo, reflect.Value)) { - xrt := reflect.TypeOf(v) - xptr := rt2id(xrt) - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - } - {{/* do not register []uint8 in fast-path */}} - {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} - fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}{{end}} - - {{range .Values}}{{if not .Primitive}}{{if .MapKey }} - fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - case *[]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/* -*/}}{{end}}{{end}}{{end}}{{end}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - case *map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/* -*/}}{{end}}{{end}}{{end}} - - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -{{/* -**** removing this block, as they are never called directly **** - - - -**** removing this block, as they are never called directly **** - - - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - case *[]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) -{{end}}{{end}}{{end}} - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - case *map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) -{{end}}{{end}}{{end}} - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - - - -**** removing this block, as they are never called directly **** - - - -**** removing this block, as they are never called directly **** -*/}} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} -func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { - if f.ti.mbs { - fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e) - } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e) - } -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) { - if v == nil { e.e.EncodeNil(); return } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteArrayStart(len(v)) - for _, v2 := range v { - if esep { ee.WriteArrayElem() } - {{ encmd .Elem "v2"}} - } - ee.WriteArrayEnd() -} -func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) { - ee, esep := e.e, e.hh.hasElemSeparators() - if len(v)%2 == 1 { - e.errorf(fastpathMapBySliceErrMsg, len(v)) - return - } - ee.WriteMapStart(len(v) / 2) - for j, v2 := range v { - if esep { - if j%2 == 0 { - ee.WriteMapElemKey() - } else { - ee.WriteMapElemValue() - } - } - {{ encmd .Elem "v2"}} - } - ee.WriteMapEnd() -} -{{end}}{{end}}{{end}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} -func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) { - if v == nil { e.e.EncodeNil(); return } - ee, esep := e.e, e.hh.hasElemSeparators() - ee.WriteMapStart(len(v)) - if e.h.Canonical { - {{if eq .MapKey "interface{}"}}{{/* out of band - */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l uint - var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if esep { ee.WriteMapElemKey() } - e.asis(v2[j].v) - if esep { ee.WriteMapElemValue() } - e.encode(v[v2[j].i]) - } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) - var i uint - for k := range v { - v2[i] = {{ $x }}(k) - i++ - } - sort.Sort({{ sorttype .MapKey false}}(v2)) - for _, k2 := range v2 { - if esep { ee.WriteMapElemKey() } - {{if eq .MapKey "string"}} if e.h.StringToRaw {ee.EncodeStringBytesRaw(bytesView(k2))} else {ee.EncodeStringEnc(cUTF8, k2)} {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} - if esep { ee.WriteMapElemValue() } - {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} - } {{end}} - } else { - for k2, v2 := range v { - if esep { ee.WriteMapElemKey() } - {{if eq .MapKey "string"}} if e.h.StringToRaw {ee.EncodeStringBytesRaw(bytesView(k2))} else {ee.EncodeStringEnc(cUTF8, k2)} {{else}}{{ encmd .MapKey "k2"}}{{end}} - if esep { ee.WriteMapElemValue() } - {{ encmd .Elem "v2"}} - } - } - ee.WriteMapEnd() -} -{{end}}{{end}}{{end}} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - var changed bool - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} - case []{{ .Elem }}: - var v2 []{{ .Elem }} - v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - case *[]{{ .Elem }}: - var v2 []{{ .Elem }} - v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d) - if changed { - *v = v2 - }{{/* -*/}}{{end}}{{end}}{{end}}{{end}} -{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/* -// maps only change if nil, and in that case, there's no point copying -*/}} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d) - case *map[{{ .MapKey }}]{{ .Elem }}: - var v2 map[{{ .MapKey }}]{{ .Elem }} - v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d) - if changed { - *v = v2 - }{{/* -*/}}{{end}}{{end}}{{end}} - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case *[]{{ .Elem }}: - *v = nil {{/* -*/}}{{end}}{{end}}{{end}} -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - case *map[{{ .MapKey }}]{{ .Elem }}: - *v = nil {{/* -*/}}{{end}}{{end}}{{end}} - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} -{{/* -Slices can change if they -- did not come from an array -- are addressable (from a ptr) -- are settable (e.g. contained in an interface{}) -*/}} -func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { - if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*[]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d) - if changed { *vp = v } - } else { - v := rv2i(rv).([]{{ .Elem }}) - v2, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, !array, d) - if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { - copy(v, v2) - } - } -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d) - if changed { *vp = v } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { - dd := d.d{{/* - // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() - */}} - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] } - changed = true - } - slh.End() - return v, changed - } - d.depthIncr() - hasLen := containerLenS > 0 - var xlen int - if hasLen && canChange { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]{{ .Elem }}, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { - if j == 0 && len(v) == 0 && canChange { - if hasLen { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - } else { - xlen = 8 - } - v = make([]{{ .Elem }}, uint(xlen)) - changed = true - } - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= len(v) { - if canChange { - v = append(v, {{ zerocmd .Elem }}) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - decodeIntoBlank = true - } - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else if dd.TryDecodeAsNil() { - v[uint(j)] = {{ zerocmd .Elem }} - } else { - {{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem }}{{ end }} - } - } - if canChange { - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = make([]{{ .Elem }}, 0) - changed = true - } - } - slh.End() - d.depthDecr() - return v, changed -} -{{end}}{{end}}{{end}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} -{{/* -Maps can change if they are -- addressable (from a ptr) -- settable (e.g. contained in an interface{}) -*/}} -func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { - if rv.Kind() == reflect.Ptr { - vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); - if changed { *vp = v } - } else { - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d) - } -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d) - if changed { *vp = v } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool, - d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { - dd, esep := d.d, d.hh.hasElemSeparators(){{/* - // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() - */}} - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) - v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) - changed = true - } - if containerLen == 0 { - dd.ReadMapEnd() - return v, changed - } - d.depthIncr() - {{ if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - {{end}}var mk {{ .MapKey }} - var mv {{ .Elem }} - hasLen := containerLen > 0 - for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { - if esep { dd.ReadMapElemKey() } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if esep { dd.ReadMapElemValue() } - if dd.TryDecodeAsNil() { - if v == nil {} else if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} } - continue - } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { v[mk] = mv } - } - dd.ReadMapEnd() - d.depthDecr() - return v, changed -} -{{end}}{{end}}{{end}} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go deleted file mode 100644 index cf97db0f..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build notfastpath - -package codec - -import "reflect" - -const fastpathEnabled = false - -// The generated fast-path code is very large, and adds a few seconds to the build time. -// This causes test execution, execution of small tools which use codec, etc -// to take a long time. -// -// To mitigate, we now support the notfastpath tag. -// This tag disables fastpath during build, allowing for faster build, test execution, -// short-program runs, etc. - -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } -func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false } - -type fastpathT struct{} -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) -} -type fastpathA [0]fastpathE - -func (x fastpathA) index(rtid uintptr) int { return -1 } - -func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { - fn := d.h.fn(uint8SliceTyp, true, true) - d.kSlice(&fn.i, reflect.ValueOf(&v).Elem()) - return v, true -} - -var fastpathAV fastpathA -var fastpathTV fastpathT - -// ---- -type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl b/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl deleted file mode 100644 index 790e914e..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl +++ /dev/null @@ -1,78 +0,0 @@ -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{else if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else { - {{var "hl"}} := {{var "l"}} > 0 - var {{var "rl"}} int - _ = {{var "rl"}} - {{if isSlice }} if {{var "hl"}} { - if {{var "l"}} > cap({{var "v"}}) { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - } else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } - } {{end}} - var {{var "j"}} int - // var {{var "dn"}} bool - for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination - {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { - if {{var "hl"}} { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - } else { - {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} - } - {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) - {{var "c"}} = true - }{{end}} - {{var "h"}}.ElemContainerState({{var "j"}}) - {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} - {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} - {{ decLineVar $x }} - {{var "v"}} <- {{ $x }} - // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this - {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} - var {{var "db"}} bool - if {{var "j"}} >= len({{var "v"}}) { - {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) - {{var "c"}} = true - {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true - {{end}} - } - if {{var "db"}} { - z.DecSwallow() - } else { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{end}} - } - {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = make([]{{ .Typ }}, 0) - {{var "c"}} = true - } {{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl b/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl deleted file mode 100644 index 8323b549..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl +++ /dev/null @@ -1,42 +0,0 @@ -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} != 0 { -{{var "hl"}} := {{var "l"}} > 0 - for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { - r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} - {{var "mdn"}} = false - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} - if {{var "mdn"}} { - if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } - } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl b/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl deleted file mode 100644 index 4249588a..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl +++ /dev/null @@ -1,27 +0,0 @@ -{{.Label}}: -switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { -case timeout{{.Sfx}} == 0: // only consume available - for { - select { - case b{{.Sfx}} := <-{{.Chan}}: - {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) - default: - break {{.Label}} - } - } -case timeout{{.Sfx}} > 0: // consume until timeout - tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) - for { - select { - case b{{.Sfx}} := <-{{.Chan}}: - {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) - case <-tt{{.Sfx}}.C: - // close(tt.C) - break {{.Label}} - } - } -default: // consume until close - for b{{.Sfx}} := range {{.Chan}} { - {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) - } -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go deleted file mode 100644 index 2a7d1aab..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go +++ /dev/null @@ -1,343 +0,0 @@ -// comment this out // + build ignore - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from gen-helper.go.tmpl - DO NOT EDIT. - -package codec - -import ( - "encoding" - "reflect" -) - -// GenVersion is the current version of codecgen. -const GenVersion = 10 - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { - ge = genHelperEncoder{e: e} - ee = genHelperEncDriver{encDriver: e.e} - return -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { - gd = genHelperDecoder{d: d} - dd = genHelperDecDriver{decDriver: d.d} - return -} - -type genHelperEncDriver struct { - encDriver -} - -func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} -func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { - encStructFieldKey(s, x.encDriver, nil, keyType, false, false) -} -func (x genHelperEncDriver) EncodeSymbol(s string) { - x.encDriver.EncodeStringEnc(cUTF8, s) -} - -type genHelperDecDriver struct { - decDriver - C checkOverflow -} - -func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} -func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { - return decStructFieldKey(x.decDriver, keyType, buf) -} -func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { - return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) -} -func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) -} -func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - f = x.DecodeFloat64() - if chkOverflow32 && chkOvf.Float32(f) { - panicv.errorf("float32 overflow: %v", f) - } - return -} -func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { - f = x.DecodeFloat64() - if chkOvf.Float32(f) { - panicv.errorf("float32 overflow: %v", f) - } - return -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - M must - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - C checkOverflow - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - // f.e.encodeI(iv, false, false) - f.e.encodeValue(reflect.ValueOf(iv), nil, false) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshalUtf8(bs, fnerr) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshalAsis(bs, fnerr) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshalRaw(bs, fnerr) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: builtin no longer supported - so we make this method a no-op, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } - -// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { -// if _, ok := f.e.hh.(*BincHandle); ok { -// return timeTypId -// } -// } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { - return i2rtid(v) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { - return f.e.h.getExt(rtid) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) WriteStr(s string) { - f.e.w.writestr(s) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: No longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: No longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { f.d.swallow() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { - return &f.d.b -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - rv := reflect.ValueOf(iv) - if chkPtr { - rv = f.d.ensureDecodeable(rv) - } - f.d.decodeValue(rv, nil, false) - // f.d.decodeValueFallback(rv) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeStringAsBytes() - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: builtin no longer supported - so we make this method a no-op, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } - -// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { -// // Note: builtin is no longer supported - so make this a no-op -// if _, ok := f.d.hh.(*BincHandle); ok { -// return timeTypId -// } -// return 0 -// } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { - return i2rtid(v) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { - return f.d.h.getExt(rtid) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: No longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: No longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { - return decInferLen(clen, maxlen, unit) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: no longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl b/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl deleted file mode 100644 index f5d0634e..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl +++ /dev/null @@ -1,308 +0,0 @@ -// comment this out // + build ignore - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from gen-helper.go.tmpl - DO NOT EDIT. - -package codec - -import ( - "encoding" - "reflect" -) - -// GenVersion is the current version of codecgen. -const GenVersion = {{ .Version }} - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { - ge = genHelperEncoder{e: e} - ee = genHelperEncDriver{encDriver: e.e} - return -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { - gd = genHelperDecoder{d: d} - dd = genHelperDecDriver{decDriver: d.d} - return -} - -type genHelperEncDriver struct { - encDriver -} - -func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} -func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { - encStructFieldKey(s, x.encDriver, nil, keyType, false, false) -} -func (x genHelperEncDriver) EncodeSymbol(s string) { - x.encDriver.EncodeStringEnc(cUTF8, s) -} - -type genHelperDecDriver struct { - decDriver - C checkOverflow -} - -func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} -func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { - return decStructFieldKey(x.decDriver, keyType, buf) -} -func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { - return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) -} -func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) -} -func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - f = x.DecodeFloat64() - if chkOverflow32 && chkOvf.Float32(f) { - panicv.errorf("float32 overflow: %v", f) - } - return -} -func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { - f = x.DecodeFloat64() - if chkOvf.Float32(f) { - panicv.errorf("float32 overflow: %v", f) - } - return -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - M must - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - C checkOverflow - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - // f.e.encodeI(iv, false, false) - f.e.encodeValue(reflect.ValueOf(iv), nil, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshalUtf8(bs, fnerr) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshalAsis(bs, fnerr) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshalRaw(bs, fnerr) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: builtin no longer supported - so we make this method a no-op, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } -// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { -// if _, ok := f.e.hh.(*BincHandle); ok { -// return timeTypId -// } -// } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { - return i2rtid(v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { - return f.e.h.getExt(rtid) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) WriteStr(s string) { - f.e.w.writestr(s) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: No longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: No longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { f.d.swallow() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { - return &f.d.b -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - rv := reflect.ValueOf(iv) - if chkPtr { - rv = f.d.ensureDecodeable(rv) - } - f.d.decodeValue(rv, nil, false) - // f.d.decodeValueFallback(rv) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeStringAsBytes() - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: builtin no longer supported - so we make this method a no-op, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } -// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { -// // Note: builtin is no longer supported - so make this a no-op -// if _, ok := f.d.hh.(*BincHandle); ok { -// return timeTypId -// } -// return 0 -// } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { - return i2rtid(v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { - return f.d.h.getExt(rtid) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: No longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: No longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { - return decInferLen(clen, maxlen, unit) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// -// Deprecated: no longer used, -// but leave in-place so that old generated files continue to work without regeneration. -func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } - diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go deleted file mode 100644 index 8b00090a..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go +++ /dev/null @@ -1,164 +0,0 @@ -// +build codecgen.exec - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl - -const genDecMapTmpl = ` -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} != 0 { -{{var "hl"}} := {{var "l"}} > 0 - for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { - r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} - {{var "mdn"}} = false - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} - if {{var "mdn"}} { - if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } - } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} -` - -const genDecListTmpl = ` -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{else if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else { - {{var "hl"}} := {{var "l"}} > 0 - var {{var "rl"}} int - _ = {{var "rl"}} - {{if isSlice }} if {{var "hl"}} { - if {{var "l"}} > cap({{var "v"}}) { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - } else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } - } {{end}} - var {{var "j"}} int - // var {{var "dn"}} bool - for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination - {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { - if {{var "hl"}} { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - } else { - {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} - } - {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) - {{var "c"}} = true - }{{end}} - {{var "h"}}.ElemContainerState({{var "j"}}) - {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} - {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} - {{ decLineVar $x }} - {{var "v"}} <- {{ $x }} - // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this - {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} - var {{var "db"}} bool - if {{var "j"}} >= len({{var "v"}}) { - {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) - {{var "c"}} = true - {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true - {{end}} - } - if {{var "db"}} { - z.DecSwallow() - } else { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{end}} - } - {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = make([]{{ .Typ }}, 0) - {{var "c"}} = true - } {{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} -` - -const genEncChanTmpl = ` -{{.Label}}: -switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { -case timeout{{.Sfx}} == 0: // only consume available - for { - select { - case b{{.Sfx}} := <-{{.Chan}}: - {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) - default: - break {{.Label}} - } - } -case timeout{{.Sfx}} > 0: // consume until timeout - tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) - for { - select { - case b{{.Sfx}} := <-{{.Chan}}: - {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) - case <-tt{{.Sfx}}.C: - // close(tt.C) - break {{.Label}} - } - } -default: // consume until close - for b{{.Sfx}} := range {{.Chan}} { - {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) - } -} -` diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/gen.go deleted file mode 100644 index 74c4aa86..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/gen.go +++ /dev/null @@ -1,2149 +0,0 @@ -// +build codecgen.exec - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "go/format" - "io" - "io/ioutil" - "math/rand" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "text/template" - "time" - "unicode" - "unicode/utf8" -) - -// --------------------------------------------------- -// codecgen supports the full cycle of reflection-based codec: -// - RawExt -// - Raw -// - Extensions -// - (Binary|Text|JSON)(Unm|M)arshal -// - generic by-kind -// -// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. -// In those areas, we try to only do reflection or interface-conversion when NECESSARY: -// - Extensions, only if Extensions are configured. -// -// However, codecgen doesn't support the following: -// - Canonical option. (codecgen IGNORES it currently) -// This is just because it has not been implemented. -// - MissingFielder implementation. -// If a type implements MissingFielder, it is completely ignored by codecgen. -// -// During encode/decode, Selfer takes precedence. -// A type implementing Selfer will know how to encode/decode itself statically. -// -// The following field types are supported: -// array: [n]T -// slice: []T -// map: map[K]V -// primitive: [u]int[n], float(32|64), bool, string -// struct -// -// --------------------------------------------------- -// Note that a Selfer cannot call (e|d).(En|De)code on itself, -// as this will cause a circular reference, as (En|De)code will call Selfer methods. -// Any type that implements Selfer must implement completely and not fallback to (En|De)code. -// -// In addition, code in this file manages the generation of fast-path implementations of -// encode/decode of slices/maps of primitive keys/values. -// -// Users MUST re-generate their implementations whenever the code shape changes. -// The generated code will panic if it was generated with a version older than the supporting library. -// --------------------------------------------------- -// -// codec framework is very feature rich. -// When encoding or decoding into an interface, it depends on the runtime type of the interface. -// The type of the interface may be a named type, an extension, etc. -// Consequently, we fallback to runtime codec for encoding/decoding interfaces. -// In addition, we fallback for any value which cannot be guaranteed at runtime. -// This allows us support ANY value, including any named types, specifically those which -// do not implement our interfaces (e.g. Selfer). -// -// This explains some slowness compared to other code generation codecs (e.g. msgp). -// This reduction in speed is only seen when your refers to interfaces, -// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } -// -// codecgen will panic if the file was generated with an old version of the library in use. -// -// Note: -// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. -// This way, there isn't a function call overhead just to see that we should not enter a block of code. -// -// Note: -// codecgen-generated code depends on the variables defined by fast-path.generated.go. -// consequently, you cannot run with tags "codecgen notfastpath". - -// GenVersion is the current version of codecgen. -// -// NOTE: Increment this value each time codecgen changes fundamentally. -// Fundamental changes are: -// - helper methods change (signature change, new ones added, some removed, etc) -// - codecgen command line changes -// -// v1: Initial Version -// v2: -// v3: Changes for Kubernetes: -// changes in signature of some unpublished helper methods and codecgen cmdline arguments. -// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) -// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. -// v6: removed unsafe from gen, and now uses codecgen.exec tag -// v7: -// v8: current - we now maintain compatibility with old generated code. -// v9: skipped -// v10: modified encDriver and decDriver interfaces. Remove deprecated methods after Jan 1, 2019 -const genVersion = 10 - -const ( - genCodecPkg = "codec1978" - genTempVarPfx = "yy" - genTopLevelVarName = "x" - - // ignore canBeNil parameter, and always set to true. - // This is because nil can appear anywhere, so we should always check. - genAnythingCanBeNil = true - - // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; - // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals - // are not executed a lot. - // - // From testing, it didn't make much difference in runtime, so keep as true (one function only) - genUseOneFunctionForDecStructMap = true -) - -type genStructMapStyle uint8 - -const ( - genStructMapStyleConsolidated genStructMapStyle = iota - genStructMapStyleLenPrefix - genStructMapStyleCheckBreak -) - -var ( - errGenAllTypesSamePkg = errors.New("All types must be in the same package") - errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice") - - genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") - genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) -) - -type genBuf struct { - buf []byte -} - -func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x } -func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x } -func (x *genBuf) v() string { return string(x.buf) } -func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) } -func (x *genBuf) reset() { - if x.buf != nil { - x.buf = x.buf[:0] - } -} - -// genRunner holds some state used during a Gen run. -type genRunner struct { - w io.Writer // output - c uint64 // counter used for generating varsfx - t []reflect.Type // list of types to run selfer on - - tc reflect.Type // currently running selfer on this type - te map[uintptr]bool // types for which the encoder has been created - td map[uintptr]bool // types for which the decoder has been created - cp string // codec import path - - im map[string]reflect.Type // imports to add - imn map[string]string // package names of imports to add - imc uint64 // counter for import numbers - - is map[reflect.Type]struct{} // types seen during import search - bp string // base PkgPath, for which we are generating for - - cpfx string // codec package prefix - - tm map[reflect.Type]struct{} // types for which enc/dec must be generated - ts []reflect.Type // types for which enc/dec must be generated - - xs string // top level variable/constant suffix - hn string // fn helper type name - - ti *TypeInfos - // rr *rand.Rand // random generator for file-specific types - - nx bool // no extensions -} - -// Gen will write a complete go file containing Selfer implementations for each -// type passed. All the types must be in the same package. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE. -func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, - ti *TypeInfos, typ ...reflect.Type) { - // All types passed to this method do not have a codec.Selfer method implemented directly. - // codecgen already checks the AST and skips any types that define the codec.Selfer methods. - // Consequently, there's no need to check and trim them if they implement codec.Selfer - - if len(typ) == 0 { - return - } - x := genRunner{ - w: w, - t: typ, - te: make(map[uintptr]bool), - td: make(map[uintptr]bool), - im: make(map[string]reflect.Type), - imn: make(map[string]string), - is: make(map[reflect.Type]struct{}), - tm: make(map[reflect.Type]struct{}), - ts: []reflect.Type{}, - bp: genImportPath(typ[0]), - xs: uid, - ti: ti, - nx: noExtensions, - } - if x.ti == nil { - x.ti = defTypeInfos - } - if x.xs == "" { - rr := rand.New(rand.NewSource(time.Now().UnixNano())) - x.xs = strconv.FormatInt(rr.Int63n(9999), 10) - } - - // gather imports first: - x.cp = genImportPath(reflect.TypeOf(x)) - x.imn[x.cp] = genCodecPkg - for _, t := range typ { - // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - if genImportPath(t) != x.bp { - panic(errGenAllTypesSamePkg) - } - x.genRefPkgs(t) - } - if buildTags != "" { - x.line("// +build " + buildTags) - x.line("") - } - x.line(` - -// Code generated by codecgen - DO NOT EDIT. - -`) - x.line("package " + pkgName) - x.line("") - x.line("import (") - if x.cp != x.bp { - x.cpfx = genCodecPkg + "." - x.linef("%s \"%s\"", genCodecPkg, x.cp) - } - // use a sorted set of im keys, so that we can get consistent output - imKeys := make([]string, 0, len(x.im)) - for k := range x.im { - imKeys = append(imKeys, k) - } - sort.Strings(imKeys) - for _, k := range imKeys { // for k, _ := range x.im { - if k == x.imn[k] { - x.linef("\"%s\"", k) - } else { - x.linef("%s \"%s\"", x.imn[k], k) - } - } - // add required packages - for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt" - if _, ok := x.im[k]; !ok { - x.line("\"" + k + "\"") - } - } - x.line(")") - x.line("") - - x.line("const (") - x.linef("// ----- content types ----") - x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8)) - x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW)) - x.linef("// ----- value types used ----") - for _, vt := range [...]valueType{ - valueTypeArray, valueTypeMap, valueTypeString, - valueTypeInt, valueTypeUint, valueTypeFloat} { - x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt)) - } - - x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs) - x.line(")") - x.line("var (") - x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") - x.line(")") - x.line("") - - x.hn = "codecSelfer" + x.xs - x.line("type " + x.hn + " struct{}") - x.line("") - - x.varsfxreset() - x.line("func init() {") - x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) - x.line("_, file, _, _ := runtime.Caller(0)") - x.outf(`panic("codecgen version mismatch: current: %v, need " + strconv.FormatInt(int64(%sGenVersion), 10) + ". Re-generate file: " + file)`, genVersion, x.cpfx) - // x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) - // x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx) - x.linef("}") - x.line("if false { var _ byte = 0; // reference the types, but skip this branch at build/run time") - // x.line("_ = strconv.ParseInt") - var n int - // for k, t := range x.im { - for _, k := range imKeys { - t := x.im[k] - x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) - n++ - } - if n > 0 { - x.out("_") - for i := 1; i < n; i++ { - x.out(", _") - } - x.out(" = v0") - for i := 1; i < n; i++ { - x.outf(", v%v", i) - } - } - x.line("} ") // close if false - x.line("}") // close init - x.line("") - - // generate rest of type info - for _, t := range typ { - x.tc = t - x.selfer(true) - x.selfer(false) - } - - for _, t := range x.ts { - rtid := rt2id(t) - // generate enc functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(true) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.encListFallback("v", t) - case reflect.Map: - x.encMapFallback("v", t) - default: - panic(errGenExpectArrayOrMap) - } - x.line("}") - x.line("") - - // generate dec functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(false) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.decListFallback("v", rtid, t) - case reflect.Map: - x.decMapFallback("v", rtid, t) - default: - panic(errGenExpectArrayOrMap) - } - x.line("}") - x.line("") - } - - x.line("") -} - -func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { - // return varname != genTopLevelVarName && t != x.tc - // the only time we checkForSelfer is if we are not at the TOP of the generated code. - return varname != genTopLevelVarName -} - -func (x *genRunner) arr2str(t reflect.Type, s string) string { - if t.Kind() == reflect.Array { - return s - } - return "" -} - -func (x *genRunner) genRequiredMethodVars(encode bool) { - x.line("var h " + x.hn) - if encode { - x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") - } else { - x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") - } - x.line("_, _, _ = h, z, r") -} - -func (x *genRunner) genRefPkgs(t reflect.Type) { - if _, ok := x.is[t]; ok { - return - } - x.is[t] = struct{}{} - tpkg, tname := genImportPath(t), t.Name() - if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { - if _, ok := x.im[tpkg]; !ok { - x.im[tpkg] = t - if idx := strings.LastIndex(tpkg, "/"); idx < 0 { - x.imn[tpkg] = tpkg - } else { - x.imc++ - x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) - } - } - } - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: - x.genRefPkgs(t.Elem()) - case reflect.Map: - x.genRefPkgs(t.Elem()) - x.genRefPkgs(t.Key()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { - x.genRefPkgs(t.Field(i).Type) - } - } - } -} - -func (x *genRunner) varsfx() string { - x.c++ - return strconv.FormatUint(x.c, 10) -} - -func (x *genRunner) varsfxreset() { - x.c = 0 -} - -func (x *genRunner) out(s string) { - _, err := io.WriteString(x.w, s) - if err != nil { - panic(err) - } -} - -func (x *genRunner) outf(s string, params ...interface{}) { - _, err := fmt.Fprintf(x.w, s, params...) - if err != nil { - panic(err) - } -} - -func (x *genRunner) line(s string) { - x.out(s) - if len(s) == 0 || s[len(s)-1] != '\n' { - x.out("\n") - } -} - -func (x *genRunner) linef(s string, params ...interface{}) { - x.outf(s, params...) - if len(s) == 0 || s[len(s)-1] != '\n' { - x.out("\n") - } -} - -func (x *genRunner) genTypeName(t reflect.Type) (n string) { - // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() - - // if the type has a PkgPath, which doesn't match the current package, - // then include it. - // We cannot depend on t.String() because it includes current package, - // or t.PkgPath because it includes full import path, - // - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "*" - t = t.Elem() - } - if tn := t.Name(); tn != "" { - return ptrPfx + x.genTypeNamePrim(t) - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) - case reflect.Slice: - return ptrPfx + "[]" + x.genTypeName(t.Elem()) - case reflect.Array: - return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) - case reflect.Chan: - return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) - default: - if t == intfTyp { - return ptrPfx + "interface{}" - } else { - return ptrPfx + x.genTypeNamePrim(t) - } - } -} - -func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { - if t.Name() == "" { - return t.String() - } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { - return t.Name() - } else { - return x.imn[genImportPath(t)] + "." + t.Name() - // return t.String() // best way to get the package name inclusive - } -} - -func (x *genRunner) genZeroValueR(t reflect.Type) string { - // if t is a named type, w - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, - reflect.Slice, reflect.Map, reflect.Invalid: - return "nil" - case reflect.Bool: - return "false" - case reflect.String: - return `""` - case reflect.Struct, reflect.Array: - return x.genTypeName(t) + "{}" - default: // all numbers - return "0" - } -} - -func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { - return genMethodNameT(t, x.tc) -} - -func (x *genRunner) selfer(encode bool) { - t := x.tc - t0 := t - // always make decode use a pointer receiver, - // and structs/arrays always use a ptr receiver (encode|decode) - isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp) - x.varsfxreset() - - fnSigPfx := "func (" + genTopLevelVarName + " " - if isptr { - fnSigPfx += "*" - } - fnSigPfx += x.genTypeName(t) - x.out(fnSigPfx) - - if isptr { - t = reflect.PtrTo(t) - } - if encode { - x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") - x.genRequiredMethodVars(true) - x.encVar(genTopLevelVarName, t) - } else { - x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - // do not use decVar, as there is no need to check TryDecodeAsNil - // or way to elegantly handle that, and also setting it to a - // non-nil value doesn't affect the pointer passed. - // x.decVar(genTopLevelVarName, t, false) - x.dec(genTopLevelVarName, t0, true) - } - x.line("}") - x.line("") - - if encode || t0.Kind() != reflect.Struct { - return - } - - // write is containerMap - if genUseOneFunctionForDecStructMap { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) - x.line("}") - x.line("") - } else { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) - x.line("}") - x.line("") - - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) - x.line("}") - x.line("") - } - - // write containerArray - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) - x.line("}") - x.line("") - -} - -// used for chan, array, slice, map -func (x *genRunner) xtraSM(varname string, t reflect.Type, encode, isptr bool) { - var ptrPfx, addrPfx string - if isptr { - ptrPfx = "*" - } else { - addrPfx = "&" - } - if encode { - x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname) - } else { - x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname) - } - x.registerXtraT(t) -} - -func (x *genRunner) registerXtraT(t reflect.Type) { - // recursively register the types - if _, ok := x.tm[t]; ok { - return - } - var tkey reflect.Type - switch t.Kind() { - case reflect.Chan, reflect.Slice, reflect.Array: - case reflect.Map: - tkey = t.Key() - default: - return - } - x.tm[t] = struct{}{} - x.ts = append(x.ts, t) - // check if this refers to any xtra types eg. a slice of array: add the array - x.registerXtraT(t.Elem()) - if tkey != nil { - x.registerXtraT(tkey) - } -} - -// encVar will encode a variable. -// The parameter, t, is the reflect.Type of the variable itself -func (x *genRunner) encVar(varname string, t reflect.Type) { - // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) - var checkNil bool - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: - checkNil = true - } - if checkNil { - x.linef("if %s == nil { r.EncodeNil() } else { ", varname) - } - - switch t.Kind() { - case reflect.Ptr: - telem := t.Elem() - tek := telem.Kind() - if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) { - x.enc(varname, genNonPtr(t)) - break - } - i := x.varsfx() - x.line(genTempVarPfx + i + " := *" + varname) - x.enc(genTempVarPfx+i, genNonPtr(t)) - case reflect.Struct, reflect.Array: - if t == timeTyp { - x.enc(varname, t) - break - } - i := x.varsfx() - x.line(genTempVarPfx + i + " := &" + varname) - x.enc(genTempVarPfx+i, t) - default: - x.enc(varname, t) - } - - if checkNil { - x.line("}") - } - -} - -// enc will encode a variable (varname) of type t, where t represents T. -// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T -// (to prevent copying), -// else t is of type T -func (x *genRunner) enc(varname string, t reflect.Type) { - rtid := rt2id(t) - ti2 := x.ti.get(rtid, t) - // We call CodecEncodeSelf if one of the following are honored: - // - the type already implements Selfer, call that - // - the type has a Selfer implementation just created, use that - // - the type is in the list of the ones we will generate for, but it is not currently being generated - - mi := x.varsfx() - // tptr := reflect.PtrTo(t) - tk := t.Kind() - if x.checkForSelfer(t, varname) { - if tk == reflect.Array || (tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T - // if tptr.Implements(selferTyp) || t.Implements(selferTyp) { - if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } else { // varname is of type T - if ti2.cs { // t.Implements(selferTyp) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } else if ti2.csp { // tptr.Implements(selferTyp) { - x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) - x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) - return - } - } - - if _, ok := x.te[rtid]; ok { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.te[rtid] = true - rtidAdded = true - } - - // check if - // - type is time.Time, RawExt, Raw - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == timeTyp { - x.linef("} else if !z.EncBasicHandle().TimeNotBuiltin { r.EncodeTime(%s)", varname) - // return - } - if t == rawTyp { - x.linef("} else { z.EncRaw(%s)", varname) - return - } - if t == rawExtTyp { - x.linef("} else { r.EncodeRawExt(%s, e)", varname) - return - } - // only check for extensions if the type is named, and has a packagePath. - var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T - if !x.nx && genImportPath(t) != "" && t.Name() != "" { - yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) - x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ", yy, varname, yy, varname, yy) - } - if arrayOrStruct { // varname is of type *T - if ti2.bm || ti2.bmp { // t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) - } - if ti2.jm || ti2.jmp { // t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) - } else if ti2.tm || ti2.tmp { // t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { - x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) - } - } else { // varname is of type T - if ti2.bm { // t.Implements(binaryMarshalerTyp) { - x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) - } else if ti2.bmp { // tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(&%v) ", varname) - } - if ti2.jm { // t.Implements(jsonMarshalerTyp) { - x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) - } else if ti2.jmp { // tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", varname) - } else if ti2.tm { // t.Implements(textMarshalerTyp) { - x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) - } else if ti2.tmp { // tptr.Implements(textMarshalerTyp) { - x.linef("} else if !z.EncBinary() { z.EncTextMarshal(&%v) ", varname) - } - } - x.line("} else {") - - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(int64(" + varname + "))") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(uint64(" + varname + "))") - case reflect.Float32: - x.line("r.EncodeFloat32(float32(" + varname + "))") - case reflect.Float64: - x.line("r.EncodeFloat64(float64(" + varname + "))") - case reflect.Bool: - x.line("r.EncodeBool(bool(" + varname + "))") - case reflect.String: - x.linef("if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw(z.BytesView(string(%s))) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, string(%s)) }", varname, x.xs, varname) - case reflect.Chan: - x.xtraSM(varname, t, true, false) - // x.encListFallback(varname, rtid, t) - case reflect.Array: - x.xtraSM(varname, t, true, true) - case reflect.Slice: - // if nil, call dedicated function - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("r.EncodeStringBytesRaw([]byte(" + varname + "))") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") - } else { - x.xtraSM(varname, t, true, false) - // x.encListFallback(varname, rtid, t) - } - case reflect.Map: - // if nil, call dedicated function - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") - } else { - x.xtraSM(varname, t, true, false) - // x.encMapFallback(varname, rtid, t) - } - case reflect.Struct: - if !inlist { - delete(x.te, rtid) - x.line("z.EncFallback(" + varname + ")") - break - } - x.encStruct(varname, rtid, t) - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.EncFallback(" + varname + ")") - } -} - -func (x *genRunner) encZero(t reflect.Type) { - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(0)") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(0)") - case reflect.Float32: - x.line("r.EncodeFloat32(0)") - case reflect.Float64: - x.line("r.EncodeFloat64(0)") - case reflect.Bool: - x.line("r.EncodeBool(false)") - case reflect.String: - x.linef(`if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw([]byte{}) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, "") }`, x.xs) - default: - x.line("r.EncodeNil()") - } -} - -func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) { - // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. - // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) - varname2 := varname + "." + t2.Name - switch t2.Type.Kind() { - case reflect.Struct: - rtid2 := rt2id(t2.Type) - ti2 := x.ti.get(rtid2, t2.Type) - // fmt.Printf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name) - if ti2.rtid == timeTypId { - buf.s("!(").s(varname2).s(".IsZero())") - break - } - if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { - buf.s("!(").s(varname2).s(".IsZero())") - break - } - if ti2.isFlag(typeInfoFlagComparable) { - buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) - break - } - // buf.s("(") - buf.s("false") - for i, n := 0, t2.Type.NumField(); i < n; i++ { - f := t2.Type.Field(i) - if f.PkgPath != "" { // unexported - continue - } - buf.s(" || ") - x.encOmitEmptyLine(f, varname2, buf) - } - //buf.s(")") - case reflect.Bool: - buf.s(varname2) - case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: - buf.s("len(").s(varname2).s(") != 0") - default: - buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) - } -} - -func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { - // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) - // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it - - // if t === type currently running selfer on, do for all - ti := x.ti.get(rtid, t) - i := x.varsfx() - sepVarname := genTempVarPfx + "sep" + i - numfieldsvar := genTempVarPfx + "q" + i - ti2arrayvar := genTempVarPfx + "r" + i - struct2arrvar := genTempVarPfx + "2arr" + i - - x.line(sepVarname + " := !z.EncBinary()") - x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) - x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) - x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray) - - tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. - - // var nn int - // due to omitEmpty, we need to calculate the - // number of non-empty things we write out first. - // This is required as we need to pre-determine the size of the container, - // to support length-prefixing. - if ti.anyOmitEmpty { - x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi)) - - for j, si := range tisfi { - _ = j - if !si.omitEmpty() { - // x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName) - x.linef("true, // %s", si.fieldName) - // nn++ - continue - } - var t2 reflect.StructField - var omitline genBuf - { - t2typ := t - varname3 := varname - // go through the loop, record the t2 field explicitly, - // and gather the omit line if embedded in pointers. - for ij, ix := range si.is { - if uint8(ij) == si.nis { - break - } - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(ix)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - // do not include actual field in the omit line. - // that is done subsequently (right after - below). - if uint8(ij+1) < si.nis && t2typ.Kind() == reflect.Ptr { - omitline.s(varname3).s(" != nil && ") - } - } - } - x.encOmitEmptyLine(t2, varname, &omitline) - x.linef("%s, // %s", omitline.v(), si.fieldName) - } - x.line("}") - x.linef("_ = %s", numfieldsvar) - } - // x.linef("var %snn%s int", genTempVarPfx, i) - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.linef("r.WriteArrayStart(%d)", len(tisfi)) - x.linef("} else {") // if not ti.toArray - if ti.anyOmitEmpty { - // nn = 0 - // x.linef("var %snn%s = %v", genTempVarPfx, i, nn) - x.linef("var %snn%s int", genTempVarPfx, i) - x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) - x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) - x.linef("%snn%s = %v", genTempVarPfx, i, 0) - } else { - x.linef("r.WriteMapStart(%d)", len(tisfi)) - } - x.line("}") // close if not StructToArray - - for j, si := range tisfi { - i := x.varsfx() - isNilVarName := genTempVarPfx + "n" + i - var labelUsed bool - var t2 reflect.StructField - { - t2typ := t - varname3 := varname - for ij, ix := range si.is { - if uint8(ij) == si.nis { - break - } - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(ix)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - if !labelUsed { - x.line("var " + isNilVarName + " bool") - } - x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") - x.line("goto LABEL" + i) - x.line("}") - labelUsed = true - // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") - } - } - // t2 = t.FieldByIndex(si.is) - } - if labelUsed { - x.line("LABEL" + i + ":") - } - // if the type of the field is a Selfer, or one of the ones - - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray - if labelUsed { - x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) - } - x.line("r.WriteArrayElem()") - if si.omitEmpty() { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.encVar(varname+"."+t2.Name, t2.Type) - if si.omitEmpty() { - x.linef("} else {") - x.encZero(t2.Type) - x.linef("}") - } - if labelUsed { - x.line("}") - } - - x.linef("} else {") // if not ti.toArray - - if si.omitEmpty() { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.line("r.WriteMapElemKey()") - - // emulate EncStructFieldKey - switch ti.keyType { - case valueTypeInt: - x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName) - case valueTypeUint: - x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName) - case valueTypeFloat: - x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName) - default: // string - if si.encNameAsciiAlphaNum { - x.linef(`if z.IsJSONHandle() { z.WriteStr("\"%s\"") } else { `, si.encName) - } - x.linef("r.EncodeStringEnc(codecSelferCcUTF8%s, `%s`)", x.xs, si.encName) - if si.encNameAsciiAlphaNum { - x.linef("}") - } - } - // x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName) - x.line("r.WriteMapElemValue()") - if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") - x.encVar(varname+"."+t2.Name, t2.Type) - x.line("}") - } else { - x.encVar(varname+"."+t2.Name, t2.Type) - } - if si.omitEmpty() { - x.line("}") - } - x.linef("} ") // end if/else ti.toArray - } - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.line("r.WriteArrayEnd()") - x.line("} else {") - x.line("r.WriteMapEnd()") - x.line("}") - -} - -func (x *genRunner) encListFallback(varname string, t reflect.Type) { - elemBytes := t.Elem().Kind() == reflect.Uint8 - if t.AssignableTo(uint8SliceTyp) { - x.linef("r.EncodeStringBytesRaw([]byte(%s))", varname) - return - } - if t.Kind() == reflect.Array && elemBytes { - x.linef("r.EncodeStringBytesRaw(((*[%d]byte)(%s))[:])", t.Len(), varname) - return - } - i := x.varsfx() - if t.Kind() == reflect.Chan { - type ts struct { - Label, Chan, Slice, Sfx string - } - tm, err := template.New("").Parse(genEncChanTmpl) - if err != nil { - panic(err) - } - x.linef("if %s == nil { r.EncodeNil() } else { ", varname) - x.linef("var sch%s []%s", i, x.genTypeName(t.Elem())) - err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i}) - if err != nil { - panic(err) - } - // x.linef("%s = sch%s", varname, i) - if elemBytes { - x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i) - x.line("}") - return - } - varname = "sch" + i - } - - x.line("r.WriteArrayStart(len(" + varname + "))") - x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) - x.line("r.WriteArrayElem()") - - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.line("r.WriteArrayEnd()") - if t.Kind() == reflect.Chan { - x.line("}") - } -} - -func (x *genRunner) encMapFallback(varname string, t reflect.Type) { - // TODO: expand this to handle canonical. - i := x.varsfx() - x.line("r.WriteMapStart(len(" + varname + "))") - x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - x.line("r.WriteMapElemKey()") - x.encVar(genTempVarPfx+"k"+i, t.Key()) - x.line("r.WriteMapElemValue()") - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.line("r.WriteMapEnd()") -} - -func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo, - newbuf, nilbuf *genBuf) (t2 reflect.StructField) { - //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - t2kind := t2typ.Kind() - var nilbufed bool - if si != nil { - for ij, ix := range si.is { - if uint8(ij) == si.nis { - break - } - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(ix)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - t2kind = t2typ.Kind() - if t2kind != reflect.Ptr { - continue - } - if newbuf != nil { - newbuf.f("if %s == nil { %s = new(%s) }\n", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - if nilbuf != nil { - if !nilbufed { - nilbuf.s("if true") - nilbufed = true - } - nilbuf.s(" && ").s(varname3).s(" != nil") - } - } - } - // if t2typ.Kind() == reflect.Ptr { - // varname3 = varname3 + t2.Name - // } - if nilbuf != nil { - if nilbufed { - nilbuf.s(" { ") - } - if nilvar != "" { - nilbuf.s(nilvar).s(" = true") - } else if tk := t2typ.Kind(); tk == reflect.Ptr { - if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 { - nilbuf.s(varname3).s(" = nil") - } else { - nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem())) - } - } else { - nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ)) - } - if nilbufed { - nilbuf.s("}") - } - } - return t2 -} - -// decVar takes a variable called varname, of type t -func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) { - // We only encode as nil if a nillable value. - // This removes some of the wasted checks for TryDecodeAsNil. - // We need to think about this more, to see what happens if omitempty, etc - // cause a nil value to be stored when something is expected. - // This could happen when decoding from a struct encoded as an array. - // For that, decVar should be called with canNil=true, to force true as its value. - var varname2 string - if t.Kind() != reflect.Ptr { - if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) { - x.dec(varname, t, false) - } - } else { - if checkNotNil { - x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) - } - // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). - // There's a chance of a **T in here which is nil. - var ptrPfx string - for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { - ptrPfx += "*" - if checkNotNil { - x.linef("if %s%s == nil { %s%s = new(%s)}", - ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) - } - } - // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it. - - if ptrPfx == "" { - x.dec(varname, t, true) - } else { - varname2 = genTempVarPfx + "z" + rand - x.line(varname2 + " := " + ptrPfx + varname) - x.dec(varname2, t, true) - } - } -} - -// decVar takes a variable called varname, of type t -func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) { - i := x.varsfx() - - // We only encode as nil if a nillable value. - // This removes some of the wasted checks for TryDecodeAsNil. - // We need to think about this more, to see what happens if omitempty, etc - // cause a nil value to be stored when something is expected. - // This could happen when decoding from a struct encoded as an array. - // For that, decVar should be called with canNil=true, to force true as its value. - - if !canBeNil { - canBeNil = genAnythingCanBeNil || !genIsImmutable(t) - } - - if canBeNil { - var buf genBuf - x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf) - x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf) - } else { - x.line("// cannot be nil") - } - - x.decVarMain(varname, i, t, checkNotNil) - - if canBeNil { - x.line("} ") - } -} - -// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true. -// t is always a basetype (i.e. not of kind reflect.Ptr). -func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) { - // assumptions: - // - the varname is to a pointer already. No need to take address of it - // - t is always a baseType T (not a *T, etc). - rtid := rt2id(t) - ti2 := x.ti.get(rtid, t) - // tptr := reflect.PtrTo(t) - if x.checkForSelfer(t, varname) { - if ti2.cs || ti2.csp { // t.Implements(selferTyp) || tptr.Implements(selferTyp) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - if _, ok := x.td[rtid]; ok { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.td[rtid] = true - rtidAdded = true - } - - // check if - // - type is time.Time, Raw, RawExt - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - - mi := x.varsfx() - // x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) - // x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - var ptrPfx, addrPfx string - if isptr { - ptrPfx = "*" - } else { - addrPfx = "&" - } - if t == timeTyp { - x.linef("} else if !z.DecBasicHandle().TimeNotBuiltin { %s%v = r.DecodeTime()", ptrPfx, varname) - // return - } - if t == rawTyp { - x.linef("} else { %s%v = z.DecRaw()", ptrPfx, varname) - return - } - - if t == rawExtTyp { - x.linef("} else { r.DecodeExt(%s%v, 0, nil)", addrPfx, varname) - return - } - - // only check for extensions if the type is named, and has a packagePath. - if !x.nx && genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - // x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) - yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) - x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", yy, varname, yy, varname, yy) - } - - if ti2.bu || ti2.bup { // t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { - x.linef("} else if z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", addrPfx, varname) - } - if ti2.ju || ti2.jup { // t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { - x.linef("} else if !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", addrPfx, varname) - } else if ti2.tu || ti2.tup { // t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { - x.linef("} else if !z.DecBinary() { z.DecTextUnmarshal(%s%v)", addrPfx, varname) - } - - x.line("} else {") - - if x.decTryAssignPrimitive(varname, t, isptr) { - return - } - - switch t.Kind() { - case reflect.Array, reflect.Chan: - x.xtraSM(varname, t, false, isptr) - case reflect.Slice: - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.linef("%s%s = r.DecodeBytes(%s(%s[]byte)(%s), false)", - ptrPfx, varname, ptrPfx, ptrPfx, varname) - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) - } else { - x.xtraSM(varname, t, false, isptr) - // x.decListFallback(varname, rtid, false, t) - } - case reflect.Map: - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) - } else { - x.xtraSM(varname, t, false, isptr) - // x.decMapFallback(varname, rtid, t) - } - case reflect.Struct: - if inlist { - // no need to create temp variable if isptr, or x.F or x[F] - if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 { - x.decStruct(varname, rtid, t) - } else { - varname2 := genTempVarPfx + "j" + mi - x.line(varname2 + " := &" + varname) - x.decStruct(varname2, rtid, t) - } - } else { - // delete(x.td, rtid) - x.line("z.DecFallback(" + addrPfx + varname + ", false)") - } - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.DecFallback(" + addrPfx + varname + ", true)") - } -} - -func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) { - // This should only be used for exact primitives (ie un-named types). - // Named types may be implementations of Selfer, Unmarshaler, etc. - // They should be handled by dec(...) - - var ptr string - if isptr { - ptr = "*" - } - switch t.Kind() { - case reflect.Int: - x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) - case reflect.Int8: - x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t)) - case reflect.Int16: - x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t)) - case reflect.Int32: - x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t)) - case reflect.Int64: - x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t)) - - case reflect.Uint: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) - case reflect.Uint8: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t)) - case reflect.Uint16: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t)) - case reflect.Uint32: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t)) - case reflect.Uint64: - x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t)) - case reflect.Uintptr: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) - - case reflect.Float32: - x.linef("%s%s = (%s)(r.DecodeFloat32As64())", ptr, varname, x.genTypeName(t)) - case reflect.Float64: - x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t)) - - case reflect.Bool: - x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t)) - case reflect.String: - x.linef("%s%s = (%s)(r.DecodeString())", ptr, varname, x.genTypeName(t)) - default: - return false - } - return true -} - -func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { - if t.AssignableTo(uint8SliceTyp) { - x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") - return - } - if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname) - return - } - type tstruc struct { - TempVar string - Rand string - Varname string - CTyp string - Typ string - Immutable bool - Size int - } - telem := t.Elem() - ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} - - funcs := make(template.FuncMap) - - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, "", telem, false, true) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - funcs["zero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["isArray"] = func() bool { - return t.Kind() == reflect.Array - } - funcs["isSlice"] = func() bool { - return t.Kind() == reflect.Slice - } - funcs["isChan"] = func() bool { - return t.Kind() == reflect.Chan - } - tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { - type tstruc struct { - TempVar string - Sfx string - Rand string - Varname string - KTyp string - Typ string - Size int - } - telem := t.Elem() - tkey := t.Key() - ts := tstruc{ - genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), - x.genTypeName(telem), int(telem.Size() + tkey.Size()), - } - - funcs := make(template.FuncMap) - funcs["decElemZero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["decElemKindImmutable"] = func() bool { - return genIsImmutable(telem) - } - funcs["decElemKindPtr"] = func() bool { - return telem.Kind() == reflect.Ptr - } - funcs["decElemKindIntf"] = func() bool { - return telem.Kind() == reflect.Interface - } - funcs["decLineVarK"] = func(varname string) string { - x.decVar(varname, "", tkey, false, true) - return "" - } - funcs["decLineVar"] = func(varname, decodedNilVarname string) string { - x.decVar(varname, decodedNilVarname, telem, false, true) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - - tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { - ti := x.ti.get(rtid, t) - tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. - x.line("switch (" + kName + ") {") - var newbuf, nilbuf genBuf - for _, si := range tisfi { - x.line("case \"" + si.encName + "\":") - newbuf.reset() - nilbuf.reset() - t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) - x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) - x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) - x.line("}") - } - x.line("default:") - // pass the slice here, so that the string will not escape, and maybe save allocation - x.line("z.DecStructFieldNotFound(-1, " + kName + ")") - x.line("} // end switch " + kName) -} - -func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { - tpfx := genTempVarPfx - ti := x.ti.get(rtid, t) - i := x.varsfx() - kName := tpfx + "s" + i - - switch style { - case genStructMapStyleLenPrefix: - x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) - case genStructMapStyleCheckBreak: - x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) - default: // 0, otherwise. - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) - x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) - x.line("} else { if r.CheckBreak() { break }; }") - } - x.line("r.ReadMapElemKey()") - - // emulate decstructfieldkey - switch ti.keyType { - case valueTypeInt: - x.linef("%s := z.StringView(strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10))", kName) - case valueTypeUint: - x.linef("%s := z.StringView(strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10))", kName) - case valueTypeFloat: - x.linef("%s := z.StringView(strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64))", kName) - default: // string - x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName) - } - // x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs) - - x.line("r.ReadMapElemValue()") - x.decStructMapSwitch(kName, varname, rtid, t) - - x.line("} // end for " + tpfx + "j" + i) - x.line("r.ReadMapEnd()") -} - -func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { - tpfx := genTempVarPfx - i := x.varsfx() - ti := x.ti.get(rtid, t) - tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. - x.linef("var %sj%s int", tpfx, i) - x.linef("var %sb%s bool", tpfx, i) // break - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - var newbuf, nilbuf genBuf - for _, si := range tisfi { - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) - x.line("r.ReadArrayElem()") - newbuf.reset() - nilbuf.reset() - t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) - x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) - x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) - x.line("}") - } - // read remaining values and throw away. - x.line("for {") - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { break }", tpfx, i) - x.line("r.ReadArrayElem()") - x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) - x.line("}") - x.line("r.ReadArrayEnd()") -} - -func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { - // varname MUST be a ptr, or a struct field or a slice element. - i := x.varsfx() - x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) - x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.line("r.ReadMapEnd()") - if genUseOneFunctionForDecStructMap { - x.line("} else { ") - x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i) - } else { - x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") - x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") - x.line("} else {") - x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") - } - x.line("}") - - // else if container is array - x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.line("r.ReadArrayEnd()") - x.line("} else { ") - x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i) - x.line("}") - // else panic - x.line("} else { ") - x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")") - x.line("} ") -} - -// -------- - -type genV struct { - // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice - MapKey string - Elem string - Primitive string - Size int -} - -func (x *genRunner) newGenV(t reflect.Type) (v genV) { - switch t.Kind() { - case reflect.Slice, reflect.Array: - te := t.Elem() - v.Elem = x.genTypeName(te) - v.Size = int(te.Size()) - case reflect.Map: - te, tk := t.Elem(), t.Key() - v.Elem = x.genTypeName(te) - v.MapKey = x.genTypeName(tk) - v.Size = int(te.Size() + tk.Size()) - default: - panic("unexpected type for newGenV. Requires map or slice type") - } - return -} - -func (x *genV) MethodNamePfx(prefix string, prim bool) string { - var name []byte - if prefix != "" { - name = append(name, prefix...) - } - if prim { - name = append(name, genTitleCaseName(x.Primitive)...) - } else { - if x.MapKey == "" { - name = append(name, "Slice"...) - } else { - name = append(name, "Map"...) - name = append(name, genTitleCaseName(x.MapKey)...) - } - name = append(name, genTitleCaseName(x.Elem)...) - } - return string(name) - -} - -// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. -// -// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, -// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. -// We strip it here. -func genImportPath(t reflect.Type) (s string) { - s = t.PkgPath() - if genCheckVendor { - // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 - s = genStripVendor(s) - } - return -} - -// A go identifier is (letter|_)[letter|number|_]* -func genGoIdentifier(s string, checkFirstChar bool) string { - b := make([]byte, 0, len(s)) - t := make([]byte, 4) - var n int - for i, r := range s { - if checkFirstChar && i == 0 && !unicode.IsLetter(r) { - b = append(b, '_') - } - // r must be unicode_letter, unicode_digit or _ - if unicode.IsLetter(r) || unicode.IsDigit(r) { - n = utf8.EncodeRune(t, r) - b = append(b, t[:n]...) - } else { - b = append(b, '_') - } - } - return string(b) -} - -func genNonPtr(t reflect.Type) reflect.Type { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -func genTitleCaseName(s string) string { - switch s { - case "interface{}", "interface {}": - return "Intf" - default: - return strings.ToUpper(s[0:1]) + s[1:] - } -} - -func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "Ptrto" - t = t.Elem() - } - tstr := t.String() - if tn := t.Name(); tn != "" { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - return ptrPfx + tn - } else { - if genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) - case reflect.Slice: - return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) - case reflect.Array: - return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) - case reflect.Chan: - var cx string - switch t.ChanDir() { - case reflect.SendDir: - cx = "ChanSend" - case reflect.RecvDir: - cx = "ChanRecv" - default: - cx = "Chan" - } - return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) - default: - if t == intfTyp { - return ptrPfx + "Interface" - } else { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - if t.Name() != "" { - return ptrPfx + t.Name() - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } else { - // best way to get the package name inclusive - // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) - if t.Name() != "" && genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - } -} - -// genCustomNameForType base64encodes the t.String() value in such a way -// that it can be used within a function name. -func genCustomTypeName(tstr string) string { - len2 := genBase64enc.EncodedLen(len(tstr)) - bufx := make([]byte, len2) - genBase64enc.Encode(bufx, []byte(tstr)) - for i := len2 - 1; i >= 0; i-- { - if bufx[i] == '=' { - len2-- - } else { - break - } - } - return string(bufx[:len2]) -} - -func genIsImmutable(t reflect.Type) (v bool) { - return isImmutableKind(t.Kind()) -} - -type genInternal struct { - Version int - Values []genV -} - -func (x genInternal) FastpathLen() (l int) { - for _, v := range x.Values { - if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") { - l++ - } - } - return -} - -func genInternalZeroValue(s string) string { - switch s { - case "interface{}", "interface {}": - return "nil" - case "bool": - return "false" - case "string": - return `""` - default: - return "0" - } -} - -var genInternalNonZeroValueIdx [5]uint64 -var genInternalNonZeroValueStrs = [2][5]string{ - {`"string-is-an-interface"`, "true", `"some-string"`, "11.1", "33"}, - {`"string-is-an-interface-2"`, "true", `"some-string-2"`, "22.2", "44"}, -} - -func genInternalNonZeroValue(s string) string { - switch s { - case "interface{}", "interface {}": - genInternalNonZeroValueIdx[0]++ - return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[0]%2][0] // return string, to remove ambiguity - case "bool": - genInternalNonZeroValueIdx[1]++ - return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[1]%2][1] - case "string": - genInternalNonZeroValueIdx[2]++ - return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[2]%2][2] - case "float32", "float64", "float", "double": - genInternalNonZeroValueIdx[3]++ - return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[3]%2][3] - default: - genInternalNonZeroValueIdx[4]++ - return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[4]%2][4] - } -} - -func genInternalEncCommandAsString(s string, vname string) string { - switch s { - case "uint", "uint8", "uint16", "uint32", "uint64": - return "ee.EncodeUint(uint64(" + vname + "))" - case "int", "int8", "int16", "int32", "int64": - return "ee.EncodeInt(int64(" + vname + "))" - case "string": - return "if e.h.StringToRaw { ee.EncodeStringBytesRaw(bytesView(" + vname + ")) " + - "} else { ee.EncodeStringEnc(cUTF8, " + vname + ") }" - case "float32": - return "ee.EncodeFloat32(" + vname + ")" - case "float64": - return "ee.EncodeFloat64(" + vname + ")" - case "bool": - return "ee.EncodeBool(" + vname + ")" - // case "symbol": - // return "ee.EncodeSymbol(" + vname + ")" - default: - return "e.encode(" + vname + ")" - } -} - -func genInternalDecCommandAsString(s string) string { - switch s { - case "uint": - return "uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" - case "uint8": - return "uint8(chkOvf.UintV(dd.DecodeUint64(), 8))" - case "uint16": - return "uint16(chkOvf.UintV(dd.DecodeUint64(), 16))" - case "uint32": - return "uint32(chkOvf.UintV(dd.DecodeUint64(), 32))" - case "uint64": - return "dd.DecodeUint64()" - case "uintptr": - return "uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" - case "int": - return "int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))" - case "int8": - return "int8(chkOvf.IntV(dd.DecodeInt64(), 8))" - case "int16": - return "int16(chkOvf.IntV(dd.DecodeInt64(), 16))" - case "int32": - return "int32(chkOvf.IntV(dd.DecodeInt64(), 32))" - case "int64": - return "dd.DecodeInt64()" - - case "string": - return "dd.DecodeString()" - case "float32": - return "float32(chkOvf.Float32V(dd.DecodeFloat64()))" - case "float64": - return "dd.DecodeFloat64()" - case "bool": - return "dd.DecodeBool()" - default: - panic(errors.New("gen internal: unknown type for decode: " + s)) - } -} - -func genInternalSortType(s string, elem bool) string { - for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { - if strings.HasPrefix(s, v) { - if elem { - if v == "int" || v == "uint" || v == "float" { - return v + "64" - } else { - return v - } - } - return v + "Slice" - } - } - panic("sorttype: unexpected type: " + s) -} - -func genStripVendor(s string) string { - // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. - // if s contains /vendor/ OR startsWith vendor/, then return everything after it. - const vendorStart = "vendor/" - const vendorInline = "/vendor/" - if i := strings.LastIndex(s, vendorInline); i >= 0 { - s = s[i+len(vendorInline):] - } else if strings.HasPrefix(s, vendorStart) { - s = s[len(vendorStart):] - } - return s -} - -// var genInternalMu sync.Mutex -var genInternalV = genInternal{Version: genVersion} -var genInternalTmplFuncs template.FuncMap -var genInternalOnce sync.Once - -func genInternalInit() { - types := [...]string{ - "interface{}", - "string", - "float32", - "float64", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "bool", - } - // keep as slice, so it is in specific iteration order. - // Initial order was uint64, string, interface{}, int, int64 - mapvaltypes := [...]string{ - "interface{}", - "string", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "float32", - "float64", - "bool", - } - wordSizeBytes := int(intBitsize) / 8 - - mapvaltypes2 := map[string]int{ - "interface{}": 2 * wordSizeBytes, - "string": 2 * wordSizeBytes, - "uint": 1 * wordSizeBytes, - "uint8": 1, - "uint16": 2, - "uint32": 4, - "uint64": 8, - "uintptr": 1 * wordSizeBytes, - "int": 1 * wordSizeBytes, - "int8": 1, - "int16": 2, - "int32": 4, - "int64": 8, - "float32": 4, - "float64": 8, - "bool": 1, - } - var gt = genInternal{Version: genVersion} - - // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function - for _, s := range types { - gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) - // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. - // gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) - // } - gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) - if _, ok := mapvaltypes2[s]; !ok { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) - } - for _, ms := range mapvaltypes { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) - } - } - - funcs := make(template.FuncMap) - // funcs["haspfx"] = strings.HasPrefix - funcs["encmd"] = genInternalEncCommandAsString - funcs["decmd"] = genInternalDecCommandAsString - funcs["zerocmd"] = genInternalZeroValue - funcs["nonzerocmd"] = genInternalNonZeroValue - funcs["hasprefix"] = strings.HasPrefix - funcs["sorttype"] = genInternalSortType - - genInternalV = gt - genInternalTmplFuncs = funcs -} - -// genInternalGoFile is used to generate source files from templates. -// It is run by the program author alone. -// Unfortunately, it has to be exported so that it can be called from a command line tool. -// *** DO NOT USE *** -func genInternalGoFile(r io.Reader, w io.Writer) (err error) { - genInternalOnce.Do(genInternalInit) - - gt := genInternalV - - t := template.New("").Funcs(genInternalTmplFuncs) - - tmplstr, err := ioutil.ReadAll(r) - if err != nil { - return - } - - if t, err = t.Parse(string(tmplstr)); err != nil { - return - } - - var out bytes.Buffer - err = t.Execute(&out, gt) - if err != nil { - return - } - - bout, err := format.Source(out.Bytes()) - if err != nil { - w.Write(out.Bytes()) // write out if error, so we can still see. - // w.Write(bout) // write out if error, as much as possible, so we can still see. - return - } - w.Write(bout) - return -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go deleted file mode 100644 index 9ddbe205..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = true - -func reflectArrayOf(count int, elem reflect.Type) reflect.Type { - return reflect.ArrayOf(count, elem) -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go deleted file mode 100644 index c5fcd669..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = false - -func reflectArrayOf(count int, elem reflect.Type) reflect.Type { - panic("codec: reflect.ArrayOf unsupported in this go version") -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go deleted file mode 100644 index bc39d6b7..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.9 - -package codec - -import "reflect" - -func makeMapReflect(t reflect.Type, size int) reflect.Value { - if size < 0 { - return reflect.MakeMapWithSize(t, 4) - } - return reflect.MakeMapWithSize(t, size) -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go deleted file mode 100644 index cde4cd37..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.9 - -package codec - -import "reflect" - -func makeMapReflect(t reflect.Type, size int) reflect.Value { - return reflect.MakeMap(t) -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go deleted file mode 100644 index 794133a3..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.10 - -package codec - -const allowSetUnexportedEmbeddedPtr = false diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go deleted file mode 100644 index fd92ede3..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.10 - -package codec - -const allowSetUnexportedEmbeddedPtr = true diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go deleted file mode 100644 index 8debfa61..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.4 - -package codec - -// This codec package will only work for go1.4 and above. -// This is for the following reasons: -// - go 1.4 was released in 2014 -// - go runtime is written fully in go -// - interface only holds pointers -// - reflect.Value is stabilized as 3 words - -func init() { - panic("codec: go 1.3 and below are not supported") -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go deleted file mode 100644 index 0f1bb01e..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.5,!go1.6 - -package codec - -import "os" - -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go deleted file mode 100644 index 2fb4b057..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.6,!go1.7 - -package codec - -import "os" - -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go deleted file mode 100644 index c5b81550..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.7 - -package codec - -const genCheckVendor = true diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go deleted file mode 100644 index 837cf240..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.5 - -package codec - -var genCheckVendor = false diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/helper.go index 228ad93d..7da3955e 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/helper.go +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/helper.go @@ -1,200 +1,68 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. package codec // Contains code shared by both encode and decode. -// Some shared ideas around encoding/decoding -// ------------------------------------------ -// -// If an interface{} is passed, we first do a type assertion to see if it is -// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. -// -// If we start with a reflect.Value, we are already in reflect.Value land and -// will try to grab the function for the underlying Type and directly call that function. -// This is more performant than calling reflect.Value.Interface(). -// -// This still helps us bypass many layers of reflection, and give best performance. -// -// Containers -// ------------ -// Containers in the stream are either associative arrays (key-value pairs) or -// regular arrays (indexed by incrementing integers). -// -// Some streams support indefinite-length containers, and use a breaking -// byte-sequence to denote that the container has come to an end. -// -// Some streams also are text-based, and use explicit separators to denote the -// end/beginning of different values. -// -// During encode, we use a high-level condition to determine how to iterate through -// the container. That decision is based on whether the container is text-based (with -// separators) or binary (without separators). If binary, we do not even call the -// encoding of separators. -// -// During decode, we use a different high-level condition to determine how to iterate -// through the containers. That decision is based on whether the stream contained -// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that -// it has to be binary, and we do not even try to read separators. -// -// Philosophy -// ------------ -// On decode, this codec will update containers appropriately: -// - If struct, update fields from stream into fields of struct. -// If field in stream not found in struct, handle appropriately (based on option). -// If a struct field has no corresponding value in the stream, leave it AS IS. -// If nil in stream, set value to nil/zero value. -// - If map, update map from stream. -// If the stream value is NIL, set the map to nil. -// - if slice, try to update up to length of array in stream. -// if container len is less than stream array length, -// and container cannot be expanded, handled (based on option). -// This means you can decode 4-element stream array into 1-element array. -// -// ------------------------------------ -// On encode, user can specify omitEmpty. This means that the value will be omitted -// if the zero value. The problem may occur during decode, where omitted values do not affect -// the value being decoded into. This means that if decoding into a struct with an -// int field with current value=5, and the field is omitted in the stream, then after -// decoding, the value will still be 5 (not 0). -// omitEmpty only works if you guarantee that you always decode into zero-values. -// -// ------------------------------------ -// We could have truncated a map to remove keys not available in the stream, -// or set values in the struct which are not in the stream to their zero values. -// We decided against it because there is no efficient way to do it. -// We may introduce it as an option later. -// However, that will require enabling it for both runtime and code generation modes. -// -// To support truncate, we need to do 2 passes over the container: -// map -// - first collect all keys (e.g. in k1) -// - for each key in stream, mark k1 that the key should not be removed -// - after updating map, do second pass and call delete for all keys in k1 which are not marked -// struct: -// - for each field, track the *typeInfo s1 -// - iterate through all s1, and for each one not marked, set value to zero -// - this involves checking the possible anonymous fields which are nil ptrs. -// too much work. -// -// ------------------------------------------ -// Error Handling is done within the library using panic. -// -// This way, the code doesn't have to keep checking if an error has happened, -// and we don't have to keep sending the error value along with each call -// or storing it in the En|Decoder and checking it constantly along the way. -// -// The disadvantage is that small functions which use panics cannot be inlined. -// The code accounts for that by only using panics behind an interface; -// since interface calls cannot be inlined, this is irrelevant. -// -// We considered storing the error is En|Decoder. -// - once it has its err field set, it cannot be used again. -// - panicing will be optional, controlled by const flag. -// - code should always check error first and return early. -// We eventually decided against it as it makes the code clumsier to always -// check for these error conditions. - import ( - "bytes" - "encoding" "encoding/binary" - "errors" "fmt" - "io" "math" "reflect" "sort" - "strconv" "strings" "sync" - "sync/atomic" "time" + "unicode" + "unicode/utf8" ) const ( - scratchByteArrayLen = 32 - // initCollectionCap = 16 // 32 is defensive. 16 is preferred. + structTagName = "codec" - // Support encoding.(Binary|Text)(Unm|M)arshaler. + // Support + // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) + // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error // This constant flag will enable or disable it. - supportMarshalInterfaces = true + supportBinaryMarshal = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + useMapForCodecCache = false + + // For some common container types, we can short-circuit an elaborate + // reflection dance and call encode/decode directly. + // The currently supported types are: + // - slices of strings, or id's (int64,uint64) or interfaces. + // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf + shortCircuitReflectToFastPath = true // for debugging, set this to false, to catch panic traces. // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. recoverPanicToErr = true - // arrayCacheLen is the length of the cache used in encoder or decoder for - // allowing zero-alloc initialization. - // arrayCacheLen = 8 - - // size of the cacheline: defaulting to value for archs: amd64, arm64, 386 - // should use "runtime/internal/sys".CacheLineSize, but that is not exposed. - cacheLineSize = 64 + // if checkStructForEmptyValue, check structs fields to see if an empty value. + // This could be an expensive call, so possibly disable it. + checkStructForEmptyValue = false - wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize - wordSize = wordSizeBits / 8 - - // so structFieldInfo fits into 8 bytes - maxLevelsEmbedding = 14 - - // useFinalizers=true configures finalizers to release pool'ed resources - // acquired by Encoder/Decoder during their GC. - // - // Note that calling SetFinalizer is always expensive, - // as code must be run on the systemstack even for SetFinalizer(t, nil). - // - // We document that folks SHOULD call Release() when done, or they can - // explicitly call SetFinalizer themselves e.g. - // runtime.SetFinalizer(e, (*Encoder).Release) - // runtime.SetFinalizer(d, (*Decoder).Release) - useFinalizers = false + // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue + derefForIsEmptyValue = false ) -var oneByteArr [1]byte -var zeroByteSlice = oneByteArr[:0:0] - -var codecgen bool - -var refBitset bitset256 -var pool pooler -var panicv panicHdl - -func init() { - pool.init() - - refBitset.set(byte(reflect.Map)) - refBitset.set(byte(reflect.Ptr)) - refBitset.set(byte(reflect.Func)) - refBitset.set(byte(reflect.Chan)) -} - -type clsErr struct { - closed bool // is it closed? - errClosed error // error on closing -} - -// type entryType uint8 - -// const ( -// entryTypeBytes entryType = iota // make this 0, so a comparison is cheap -// entryTypeIo -// entryTypeBufio -// entryTypeUnset = 255 -// ) - type charEncoding uint8 const ( - _ charEncoding = iota // make 0 unset - cUTF8 - cUTF16LE - cUTF16BE - cUTF32LE - cUTF32BE - // Deprecated: not a true char encoding value - cRAW charEncoding = 255 + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE ) // valueType is the stream type @@ -212,2508 +80,517 @@ const ( valueTypeBytes valueTypeMap valueTypeArray - valueTypeTime + valueTypeTimestamp valueTypeExt - // valueTypeInvalid = 0xff -) - -var valueTypeStrings = [...]string{ - "Unset", - "Nil", - "Int", - "Uint", - "Float", - "Bool", - "String", - "Symbol", - "Bytes", - "Map", - "Array", - "Timestamp", - "Ext", -} - -func (x valueType) String() string { - if int(x) < len(valueTypeStrings) { - return valueTypeStrings[x] - } - return strconv.FormatInt(int64(x), 10) -} - -type seqType uint8 - -const ( - _ seqType = iota - seqTypeArray - seqTypeSlice - seqTypeChan -) - -// note that containerMapStart and containerArraySend are not sent. -// This is because the ReadXXXStart and EncodeXXXStart already does these. -type containerState uint8 - -const ( - _ containerState = iota - - containerMapStart // slot left open, since Driver method already covers it - containerMapKey - containerMapValue - containerMapEnd - containerArrayStart // slot left open, since Driver methods already cover it - containerArrayElem - containerArrayEnd -) - -// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo -// type sfiIdx struct { -// name string -// index int -// } - -// do not recurse if a containing type refers to an embedded type -// which refers back to its containing type (via a pointer). -// The second time this back-reference happens, break out, -// so as not to cause an infinite loop. -const rgetMaxRecursion = 2 - -// Anecdotally, we believe most types have <= 12 fields. -// - even Java's PMD rules set TooManyFields threshold to 15. -// However, go has embedded fields, which should be regarded as -// top level, allowing structs to possibly double or triple. -// In addition, we don't want to keep creating transient arrays, -// especially for the sfi index tracking, and the evtypes tracking. -// -// So - try to keep typeInfoLoadArray within 2K bytes -const ( - typeInfoLoadArraySfisLen = 16 - typeInfoLoadArraySfiidxLen = 8 * 112 - typeInfoLoadArrayEtypesLen = 12 - typeInfoLoadArrayBLen = 8 * 4 + valueTypeInvalid = 0xff ) -type typeInfoLoad struct { - // fNames []string - // encNames []string - etypes []uintptr - sfis []structFieldInfo -} - -type typeInfoLoadArray struct { - // fNames [typeInfoLoadArrayLen]string - // encNames [typeInfoLoadArrayLen]string - sfis [typeInfoLoadArraySfisLen]structFieldInfo - sfiidx [typeInfoLoadArraySfiidxLen]byte - etypes [typeInfoLoadArrayEtypesLen]uintptr - b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names -} - -// mirror json.Marshaler and json.Unmarshaler here, -// so we don't import the encoding/json package - -type jsonMarshaler interface { - MarshalJSON() ([]byte, error) -} -type jsonUnmarshaler interface { - UnmarshalJSON([]byte) error -} - -type isZeroer interface { - IsZero() bool -} - -type codecError struct { - name string - err interface{} -} - -func (e codecError) Cause() error { - switch xerr := e.err.(type) { - case nil: - return nil - case error: - return xerr - case string: - return errors.New(xerr) - case fmt.Stringer: - return errors.New(xerr.String()) - default: - return fmt.Errorf("%v", e.err) - } -} - -func (e codecError) Error() string { - return fmt.Sprintf("%s error: %v", e.name, e.err) -} - -// type byteAccepter func(byte) bool - var ( bigen = binary.BigEndian structInfoFieldName = "_struct" - mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem() - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - rawTyp = reflect.TypeOf(Raw{}) - uintptrTyp = reflect.TypeOf(uintptr(0)) - uint8Typ = reflect.TypeOf(uint8(0)) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - uintTyp = reflect.TypeOf(uint(0)) - intTyp = reflect.TypeOf(int(0)) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - - binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() - - textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - - jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() - jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() - - selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() - missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem() - iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() - - uint8TypId = rt2id(uint8Typ) - uint8SliceTypId = rt2id(uint8SliceTyp) - rawExtTypId = rt2id(rawExtTyp) - rawTypId = rt2id(rawTyp) - intfTypId = rt2id(intfTyp) - timeTypId = rt2id(timeTyp) - stringTypId = rt2id(stringTyp) - - mapStrIntfTypId = rt2id(mapStrIntfTyp) - mapIntfIntfTypId = rt2id(mapIntfIntfTyp) - intfSliceTypId = rt2id(intfSliceTyp) - // mapBySliceTypId = rt2id(mapBySliceTyp) + cachedTypeInfo = make(map[uintptr]*typeInfo, 4) + cachedTypeInfoMutex sync.RWMutex + + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + strSliceTyp = reflect.TypeOf([]string(nil)) + boolSliceTyp = reflect.TypeOf([]bool(nil)) + uintSliceTyp = reflect.TypeOf([]uint(nil)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uint16SliceTyp = reflect.TypeOf([]uint16(nil)) + uint32SliceTyp = reflect.TypeOf([]uint32(nil)) + uint64SliceTyp = reflect.TypeOf([]uint64(nil)) + intSliceTyp = reflect.TypeOf([]int(nil)) + int8SliceTyp = reflect.TypeOf([]int8(nil)) + int16SliceTyp = reflect.TypeOf([]int16(nil)) + int32SliceTyp = reflect.TypeOf([]int32(nil)) + int64SliceTyp = reflect.TypeOf([]int64(nil)) + float32SliceTyp = reflect.TypeOf([]float32(nil)) + float64SliceTyp = reflect.TypeOf([]float64(nil)) - intBitsize = uint8(intTyp.Bits()) - uintBitsize = uint8(uintTyp.Bits()) - - // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) + + mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) + mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) + mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) + mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() + + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() + + boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() + uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() + uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() + uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() + intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() + int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() + int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() + int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() + int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() + float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() + float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() + + mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() + mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() + mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() + mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() + // Id = reflect.ValueOf().Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() + binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - - chkOvf checkOverflow - - errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo") ) -var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) - -var immutableKindsSet = [32]bool{ - // reflect.Invalid: , - reflect.Bool: true, - reflect.Int: true, - reflect.Int8: true, - reflect.Int16: true, - reflect.Int32: true, - reflect.Int64: true, - reflect.Uint: true, - reflect.Uint8: true, - reflect.Uint16: true, - reflect.Uint32: true, - reflect.Uint64: true, - reflect.Uintptr: true, - reflect.Float32: true, - reflect.Float64: true, - reflect.Complex64: true, - reflect.Complex128: true, - // reflect.Array - // reflect.Chan - // reflect.Func: true, - // reflect.Interface - // reflect.Map - // reflect.Ptr - // reflect.Slice - reflect.String: true, - // reflect.Struct - // reflect.UnsafePointer -} - -// Selfer defines methods by which a value can encode or decode itself. -// -// Any type which implements Selfer will be able to encode or decode itself. -// Consequently, during (en|de)code, this takes precedence over -// (text|binary)(M|Unm)arshal or extension support. -// -// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself. -// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error. -// For example, the snippet below will cause such an error. -// type testSelferRecur struct{} -// func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) } -// func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) } -// -// Note: *the first set of bytes of any value MUST NOT represent nil in the format*. -// This is because, during each decode, we first check the the next set of bytes -// represent nil, and if so, we just set the value to nil. -type Selfer interface { - CodecEncodeSelf(*Encoder) - CodecDecodeSelf(*Decoder) +type binaryUnmarshaler interface { + UnmarshalBinary(data []byte) error } -// MissingFielder defines the interface allowing structs to internally decode or encode -// values which do not map to struct fields. -// -// We expect that this interface is bound to a pointer type (so the mutation function works). -// -// A use-case is if a version of a type unexports a field, but you want compatibility between -// both versions during encoding and decoding. -// -// Note that the interface is completely ignored during codecgen. -type MissingFielder interface { - // CodecMissingField is called to set a missing field and value pair. - // - // It returns true if the missing field was set on the struct. - CodecMissingField(field []byte, value interface{}) bool - - // CodecMissingFields returns the set of fields which are not struct fields - CodecMissingFields() map[string]interface{} +type binaryMarshaler interface { + MarshalBinary() (data []byte, err error) } -// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream. +// MapBySlice represents a slice which should be encoded as a map in the stream. // The slice contains a sequence of key-value pairs. -// This affords storing a map in a specific sequence in the stream. -// -// Example usage: -// type T1 []string // or []int or []Point or any other "slice" type -// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map -// type T2 struct { KeyValues T1 } -// -// var kvs = []string{"one", "1", "two", "2", "three", "3"} -// var v2 = T2{ KeyValues: T1(kvs) } -// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} } -// -// The support of MapBySlice affords the following: -// - A slice type which implements MapBySlice will be encoded as a map -// - A slice can be decoded from a map in the stream -// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice type MapBySlice interface { MapBySlice() } -// BasicHandle encapsulates the common options and extension functions. +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. // -// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// BasicHandle encapsulates the common options and extension functions. type BasicHandle struct { - // BasicHandle is always a part of a different type. - // It doesn't have to fit into it own cache lines. - - // TypeInfos is used to get the type info for any type. - // - // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json - TypeInfos *TypeInfos - - // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls). - // If *[]T is used instead, this becomes comparable, at the cost of extra indirection. - // Thses slices are used all the time, so keep as slices (not pointers). - extHandle - - intf2impls - - inited uint32 - _ uint32 // padding - - // ---- cache line - - RPCOptions - - // TimeNotBuiltin configures whether time.Time should be treated as a builtin type. - // - // All Handlers should know how to encode/decode time.Time as part of the core - // format specification, or as a standard extension defined by the format. - // - // However, users can elect to handle time.Time as a custom extension, or via the - // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface. - // To elect this behavior, users can set TimeNotBuiltin=true. - // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior - // (for Cbor and Msgpack), where time.Time was not a builtin supported type. - TimeNotBuiltin bool - - // ExplicitRelease configures whether Release() is implicitly called after an encode or - // decode call. - // - // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...) - // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer, - // then you do not want it to be implicitly closed after each Encode/Decode call. - // Doing so will unnecessarily return resources to the shared pool, only for you to - // grab them right after again to do another Encode/Decode call. - // - // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when - // you are truly done. - // - // As an alternative, you can explicitly set a finalizer - so its resources - // are returned to the shared pool before it is garbage-collected. Do it as below: - // runtime.SetFinalizer(e, (*Encoder).Release) - // runtime.SetFinalizer(d, (*Decoder).Release) - ExplicitRelease bool - - be bool // is handle a binary encoding? - js bool // is handle javascript handler? - n byte // first letter of handle name - _ uint16 // padding - - // ---- cache line - + EncodeOptions DecodeOptions +} - // ---- cache line - - EncodeOptions +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + writeExt() bool + getBasicHandle() *BasicHandle + newEncDriver(w encWriter) encDriver + newDecDriver(r decReader) decDriver +} - // noBuiltInTypeChecker +// RawExt represents raw unprocessed extension data. +type RawExt struct { + Tag byte + Data []byte +} - rtidFns atomicRtidFnSlice - mu sync.Mutex - // r []uintptr // rtids mapped to s above +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag byte + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error } -// basicHandle returns an initialized BasicHandle from the Handle. -func basicHandle(hh Handle) (x *BasicHandle) { - x = hh.getBasicHandle() - // ** We need to simulate once.Do, to ensure no data race within the block. - // ** Consequently, below would not work. - // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) { - // x.be = hh.isBinary() - // _, x.js = hh.(*JsonHandle) - // x.n = hh.Name()[0] +type extHandle []*extTypeTagFn + +// AddExt registers an encode and decode function for a reflect.Type. +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, + tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error, +) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + // o cannot be nil, since it is always embedded in a Handle. + // if nil, let it panic. + // if o == nil { + // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") + // return // } - // simulate once.Do using our own stored flag and mutex as a CompareAndSwap - // is not sufficient, since a race condition can occur within init(Handle) function. - // init is made noinline, so that this function can be inlined by its caller. - if atomic.LoadUint32(&x.inited) == 0 { - x.init(hh) + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.encFn, v.decFn = tag, encfn, decfn + return + } } + + *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) return } -//go:noinline -func (x *BasicHandle) init(hh Handle) { - // make it uninlineable, as it is called at most once - x.mu.Lock() - if x.inited == 0 { - x.be = hh.isBinary() - _, x.js = hh.(*JsonHandle) - x.n = hh.Name()[0] - atomic.StoreUint32(&x.inited, 1) +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + for _, v := range o { + if v.rtid == rtid { + return v + } } - x.mu.Unlock() + return nil } -func (x *BasicHandle) getBasicHandle() *BasicHandle { - return x +func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { + for _, v := range o { + if v.tag == tag { + return v + } + } + return nil } -func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - if x.TypeInfos == nil { - return defTypeInfos.get(rtid, rt) +func (o extHandle) getDecodeExtForTag(tag byte) ( + rv reflect.Value, fn func(reflect.Value, []byte) error) { + if x := o.getExtForTag(tag); x != nil { + // ext is only registered for base + rv = reflect.New(x.rt).Elem() + fn = x.decFn } - return x.TypeInfos.get(rtid, rt) + return } -func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) { - // binary search. adapted from sort/search.go. - // Note: we use goto (instead of for loop) so this can be inlined. - - // h, i, j := 0, 0, len(s) - var h uint // var h, i uint - var j = uint(len(s)) -LOOP: - if i < j { - h = i + (j-i)/2 - if s[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - goto LOOP +func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.decFn } - if i < uint(len(s)) && s[i].rtid == rtid { - fn = s[i].fn + return +} + +func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.encFn } return } -func (x *BasicHandle) fn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { - rtid := rt2id(rt) - sp := x.rtidFns.load() - if sp != nil { - if _, fn = findFn(sp, rtid); fn != nil { - // xdebugf("<<<< %c: found fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) - return - } +type structFieldInfo struct { + encName string // encode name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? + + // tag string // tag + // name string // field name + // encNameBs []byte // encoded name as byte stream + // ikind int // kind of the field as an int i.e. int(reflect.Kind) +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + if fname == "" { + panic("parseStructFieldInfo: No Field Name") } - c := x - // xdebugf("#### for %c: load fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) - fn = new(codecFn) - fi := &(fn.i) - ti := c.getTypeInfo(rtid, rt) - fi.ti = ti - - rk := reflect.Kind(ti.kind) - - if checkCodecSelfer && (ti.cs || ti.csp) { - fn.fe = (*Encoder).selferMarshal - fn.fd = (*Decoder).selferUnmarshal - fi.addrF = true - fi.addrD = ti.csp - fi.addrE = ti.csp - } else if rtid == timeTypId && !c.TimeNotBuiltin { - fn.fe = (*Encoder).kTime - fn.fd = (*Decoder).kTime - } else if rtid == rawTypId { - fn.fe = (*Encoder).raw - fn.fd = (*Decoder).raw - } else if rtid == rawExtTypId { - fn.fe = (*Encoder).rawExt - fn.fd = (*Decoder).rawExt - fi.addrF = true - fi.addrD = true - fi.addrE = true - } else if xfFn := c.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.fe = (*Encoder).ext - fn.fd = (*Decoder).ext - fi.addrF = true - fi.addrD = true - if rk == reflect.Struct || rk == reflect.Array { - fi.addrE = true - } - } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) { - fn.fe = (*Encoder).binaryMarshal - fn.fd = (*Decoder).binaryUnmarshal - fi.addrF = true - fi.addrD = ti.bup - fi.addrE = ti.bmp - } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) { - //If JSON, we should check JSONMarshal before textMarshal - fn.fe = (*Encoder).jsonMarshal - fn.fd = (*Decoder).jsonUnmarshal - fi.addrF = true - fi.addrD = ti.jup - fi.addrE = ti.jmp - } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) { - fn.fe = (*Encoder).textMarshal - fn.fd = (*Decoder).textUnmarshal - fi.addrF = true - fi.addrD = ti.tup - fi.addrE = ti.tmp - } else { - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if ti.pkgpath == "" { // un-named slice or map - if idx := fastpathAV.index(rtid); idx != -1 { - fn.fe = fastpathAV[idx].encfn - fn.fd = fastpathAV[idx].decfn - fi.addrD = true - fi.addrF = false + si := structFieldInfo{ + // name: fname, + encName: fname, + // tag: stag, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s } } else { - // use mapping for underlying type if there - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(ti.key, ti.elem) - } else { - rtu = reflect.SliceOf(ti.elem) - } - rtuid := rt2id(rtu) - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].encfn - xrt := fastpathAV[idx].rt - fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { - xfnf(e, xf, xrv.Convert(xrt)) - } - fi.addrD = true - fi.addrF = false // meaning it can be an address(ptr) or a value - xfnf2 := fastpathAV[idx].decfn - fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { - if xrv.Kind() == reflect.Ptr { - xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) - } else { - xfnf2(d, xf, xrv.Convert(xrt)) - } - } - } - } - } - if fn.fe == nil && fn.fd == nil { - switch rk { - case reflect.Bool: - fn.fe = (*Encoder).kBool - fn.fd = (*Decoder).kBool - case reflect.String: - fn.fe = (*Encoder).kString - fn.fd = (*Decoder).kString - case reflect.Int: - fn.fd = (*Decoder).kInt - fn.fe = (*Encoder).kInt - case reflect.Int8: - fn.fe = (*Encoder).kInt8 - fn.fd = (*Decoder).kInt8 - case reflect.Int16: - fn.fe = (*Encoder).kInt16 - fn.fd = (*Decoder).kInt16 - case reflect.Int32: - fn.fe = (*Encoder).kInt32 - fn.fd = (*Decoder).kInt32 - case reflect.Int64: - fn.fe = (*Encoder).kInt64 - fn.fd = (*Decoder).kInt64 - case reflect.Uint: - fn.fd = (*Decoder).kUint - fn.fe = (*Encoder).kUint - case reflect.Uint8: - fn.fe = (*Encoder).kUint8 - fn.fd = (*Decoder).kUint8 - case reflect.Uint16: - fn.fe = (*Encoder).kUint16 - fn.fd = (*Decoder).kUint16 - case reflect.Uint32: - fn.fe = (*Encoder).kUint32 - fn.fd = (*Decoder).kUint32 - case reflect.Uint64: - fn.fe = (*Encoder).kUint64 - fn.fd = (*Decoder).kUint64 - case reflect.Uintptr: - fn.fe = (*Encoder).kUintptr - fn.fd = (*Decoder).kUintptr - case reflect.Float32: - fn.fe = (*Encoder).kFloat32 - fn.fd = (*Decoder).kFloat32 - case reflect.Float64: - fn.fe = (*Encoder).kFloat64 - fn.fd = (*Decoder).kFloat64 - case reflect.Invalid: - fn.fe = (*Encoder).kInvalid - fn.fd = (*Decoder).kErr - case reflect.Chan: - fi.seq = seqTypeChan - fn.fe = (*Encoder).kSlice - fn.fd = (*Decoder).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.fe = (*Encoder).kSlice - fn.fd = (*Decoder).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.fe = (*Encoder).kSlice - fi.addrF = false - fi.addrD = false - rt2 := reflect.SliceOf(ti.elem) - fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { - d.h.fn(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) - } - // fn.fd = (*Decoder).kArray - case reflect.Struct: - if ti.anyOmitEmpty || ti.mf || ti.mfp { - fn.fe = (*Encoder).kStruct - } else { - fn.fe = (*Encoder).kStructNoOmitempty + switch s { + case "omitempty": + si.omitEmpty = true + case "toarray": + si.toArray = true } - fn.fd = (*Decoder).kStruct - case reflect.Map: - fn.fe = (*Encoder).kMap - fn.fd = (*Decoder).kMap - case reflect.Interface: - // encode: reflect.Interface are handled already by preEncodeValue - fn.fd = (*Decoder).kInterface - fn.fe = (*Encoder).kErr - default: - // reflect.Ptr and reflect.Interface are handled already by preEncodeValue - fn.fe = (*Encoder).kErr - fn.fd = (*Decoder).kErr } } } + // si.encNameBs = []byte(si.encName) + return &si +} - c.mu.Lock() - var sp2 []codecRtidFn - sp = c.rtidFns.load() - if sp == nil { - sp2 = []codecRtidFn{{rtid, fn}} - c.rtidFns.store(sp2) - // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) - // xdebugf(">>>> loading stored rtidfns of size: %v", len(c.rtidFns.load())) - } else { - idx, fn2 := findFn(sp, rtid) - if fn2 == nil { - sp2 = make([]codecRtidFn, len(sp)+1) - copy(sp2, sp[:idx]) - copy(sp2[idx+1:], sp[idx:]) - sp2[idx] = codecRtidFn{rtid, fn} - c.rtidFns.store(sp2) - // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) +type sfiSortedByEncName []*structFieldInfo - } - } - c.mu.Unlock() - return +func (p sfiSortedByEncName) Len() int { + return len(p) } -// Handle defines a specific encoding format. It also stores any runtime state -// used during an Encoding or Decoding session e.g. stored state about Types, etc. -// -// Once a handle is configured, it can be shared across multiple Encoders and Decoders. -// -// Note that a Handle is NOT safe for concurrent modification. -// Consequently, do not modify it after it is configured if shared among -// multiple Encoders and Decoders in different goroutines. -// -// Consequently, the typical usage model is that a Handle is pre-configured -// before first time use, and not modified while in use. -// Such a pre-configured Handle is safe for concurrent access. -type Handle interface { - Name() string - // return the basic handle. It may not have been inited. - // Prefer to use basicHandle() helper function that ensures it has been inited. - getBasicHandle() *BasicHandle - recreateEncDriver(encDriver) bool - newEncDriver(w *Encoder) encDriver - newDecDriver(r *Decoder) decDriver - isBinary() bool - hasElemSeparators() bool - // IsBuiltinType(rtid uintptr) bool +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName } -// Raw represents raw formatted bytes. -// We "blindly" store it during encode and retrieve the raw bytes during decode. -// Note: it is dangerous during encode, so we may gate the behaviour -// behind an Encode flag which must be explicitly set. -type Raw []byte +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} -// RawExt represents raw unprocessed extension data. -// Some codecs will decode extension data as a *RawExt -// if there is no registered extension for the tag. +// typeInfo keeps information about each type referenced in the encode/decode sequence. // -// Only one of Data or Value is nil. -// If Data is nil, then the content of the RawExt is in the Value. -type RawExt struct { - Tag uint64 - // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value. - // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types - Data []byte - // Value represents the extension, if Data is nil. - // Value is used by codecs (e.g. cbor, json) which leverage the format to do - // custom serialization of the types. - Value interface{} -} +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. -// BytesExt handles custom (de)serialization of types to/from []byte. -// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. -type BytesExt interface { - // WriteExt converts a value to a []byte. - // - // Note: v is a pointer iff the registered extension type is a struct or array kind. - WriteExt(v interface{}) []byte + rt reflect.Type + rtid uintptr - // ReadExt updates a value from a []byte. - // - // Note: dst is always a pointer kind to the registered extension type. - ReadExt(dst interface{}, src []byte) + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + m bool // base type (T or *T) is a binaryMarshaler + unm bool // base type (T or *T) is a binaryUnmarshaler + mIndir int8 // number of indirections to get to binaryMarshaler type + unmIndir int8 // number of indirections to get to binaryUnmarshaler type + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 } -// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. -// The Encoder or Decoder will then handle the further (de)serialization of that known type. -// -// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types. -type InterfaceExt interface { - // ConvertExt converts a value into a simpler interface for easy encoding - // e.g. convert time.Time to int64. - // - // Note: v is a pointer iff the registered extension type is a struct or array kind. - ConvertExt(v interface{}) interface{} +func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + cachedTypeInfoMutex.RLock() + pti, ok = cachedTypeInfo[rtid] + cachedTypeInfoMutex.RUnlock() + if ok { + return + } - // UpdateExt updates a value from a simpler interface for easy decoding - // e.g. convert int64 to time.Time. - // - // Note: dst is always a pointer kind to the registered extension type. - UpdateExt(dst interface{}, src interface{}) -} + cachedTypeInfoMutex.Lock() + defer cachedTypeInfoMutex.Unlock() + if pti, ok = cachedTypeInfo[rtid]; ok { + return + } -// Ext handles custom (de)serialization of custom types / extensions. -type Ext interface { - BytesExt - InterfaceExt -} + ti := typeInfo{rt: rt, rtid: rtid} + pti = &ti -// addExtWrapper is a wrapper implementation to support former AddExt exported method. -type addExtWrapper struct { - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.m, ti.mIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.unm, ti.unmIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } -func (x addExtWrapper) WriteExt(v interface{}) []byte { - bs, err := x.encFn(reflect.ValueOf(v)) - if err != nil { - panic(err) + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir } - return bs + + if rt.Kind() == reflect.Struct { + var siInfo *structFieldInfo + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) + ti.toArray = siInfo.toArray + } + sfip := make([]*structFieldInfo, 0, rt.NumField()) + rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) + + // // try to put all si close together + // const tryToPutAllStructFieldInfoTogether = true + // if tryToPutAllStructFieldInfoTogether { + // sfip2 := make([]structFieldInfo, len(sfip)) + // for i, si := range sfip { + // sfip2[i] = *si + // } + // for i := range sfip { + // sfip[i] = &sfip2[i] + // } + // } + + ti.sfip = make([]*structFieldInfo, len(sfip)) + ti.sfi = make([]*structFieldInfo, len(sfip)) + copy(ti.sfip, sfip) + sort.Sort(sfiSortedByEncName(sfip)) + copy(ti.sfi, sfip) + } + // sfi = sfip + cachedTypeInfo[rtid] = pti + return } -func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { - if err := x.decFn(reflect.ValueOf(v), bs); err != nil { - panic(err) +func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, + sfi *[]*structFieldInfo, siInfo *structFieldInfo, +) { + // for rt.Kind() == reflect.Ptr { + // // indexstack = append(indexstack, 0) + // rt = rt.Elem() + // } + for j := 0; j < rt.NumField(); j++ { + f := rt.Field(j) + stag := f.Tag.Get(structTagName) + if stag == "-" { + continue + } + if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + continue + } + // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. + if f.Anonymous && stag == "" { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) + continue + } + } + // do not let fields with same name in embedded structs override field at higher level. + // this must be done after anonymous check, to allow anonymous field + // still include their child fields + if _, ok := fnameToHastag[f.Name]; ok { + continue + } + si := parseStructFieldInfo(f.Name, stag) + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if siInfo != nil { + if siInfo.omitEmpty { + si.omitEmpty = true + } + } + *sfi = append(*sfi, si) + fnameToHastag[f.Name] = stag != "" } } -func (x addExtWrapper) ConvertExt(v interface{}) interface{} { - return x.WriteExt(v) +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } } -func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { - x.ReadExt(dest, v.([]byte)) +func doPanic(tag string, format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = tag + copy(params2[1:], params) + panic(fmt.Errorf("%s: "+format, params2...)) } -type extWrapper struct { - BytesExt - InterfaceExt +func checkOverflowFloat32(f float64, doCheck bool) { + if !doCheck { + return + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() + f2 := f + if f2 < 0 { + f2 = -f + } + if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { + decErr("Overflow float32 value: %v", f2) + } } -type bytesExtFailer struct{} - -func (bytesExtFailer) WriteExt(v interface{}) []byte { - panicv.errorstr("BytesExt.WriteExt is not supported") - return nil -} -func (bytesExtFailer) ReadExt(v interface{}, bs []byte) { - panicv.errorstr("BytesExt.ReadExt is not supported") -} - -type interfaceExtFailer struct{} - -func (interfaceExtFailer) ConvertExt(v interface{}) interface{} { - panicv.errorstr("InterfaceExt.ConvertExt is not supported") - return nil -} -func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) { - panicv.errorstr("InterfaceExt.UpdateExt is not supported") -} - -type binaryEncodingType struct{} - -func (binaryEncodingType) isBinary() bool { return true } - -type textEncodingType struct{} - -func (textEncodingType) isBinary() bool { return false } - -// noBuiltInTypes is embedded into many types which do not support builtins -// e.g. msgpack, simple, cbor. - -// type noBuiltInTypeChecker struct{} -// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } -// type noBuiltInTypes struct{ noBuiltInTypeChecker } - -type noBuiltInTypes struct{} - -func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} -func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} - -// type noStreamingCodec struct{} -// func (noStreamingCodec) CheckBreak() bool { return false } -// func (noStreamingCodec) hasElemSeparators() bool { return false } - -type noElemSeparators struct{} - -func (noElemSeparators) hasElemSeparators() (v bool) { return } -func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return } - -// bigenHelper. -// Users must already slice the x completely, because we will not reslice. -type bigenHelper struct { - x []byte // must be correctly sliced to appropriate len. slicing is a cost. - w *encWriterSwitch -} - -func (z bigenHelper) writeUint16(v uint16) { - bigen.PutUint16(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint32(v uint32) { - bigen.PutUint32(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint64(v uint64) { - bigen.PutUint64(z.x, v) - z.w.writeb(z.x) -} - -type extTypeTagFn struct { - rtid uintptr - rtidptr uintptr - rt reflect.Type - tag uint64 - ext Ext - _ [1]uint64 // padding -} - -type extHandle []extTypeTagFn - -// AddExt registes an encode and decode function for a reflect.Type. -// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. -// -// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. -func (o *extHandle) AddExt(rt reflect.Type, tag byte, - encfn func(reflect.Value) ([]byte, error), - decfn func(reflect.Value, []byte) error) (err error) { - if encfn == nil || decfn == nil { - return o.SetExt(rt, uint64(tag), nil) - } - return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) -} - -// SetExt will set the extension for a tag and reflect.Type. -// Note that the type must be a named type, and specifically not a pointer or Interface. -// An error is returned if that is not honored. -// To Deregister an ext, call SetExt with nil Ext. -// -// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. -func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { - // o is a pointer, because we may need to initialize it - rk := rt.Kind() - for rk == reflect.Ptr { - rt = rt.Elem() - rk = rt.Kind() - } - - if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr { - return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt) - } - - rtid := rt2id(rt) - switch rtid { - case timeTypId, rawTypId, rawExtTypId: - // all natively supported type, so cannot have an extension - return // TODO: should we silently ignore, or return an error??? - } - // if o == nil { - // return errors.New("codec.Handle.SetExt: extHandle not initialized") - // } - o2 := *o - // if o2 == nil { - // return errors.New("codec.Handle.SetExt: extHandle not initialized") - // } - for i := range o2 { - v := &o2[i] - if v.rtid == rtid { - v.tag, v.ext = tag, ext - return - } - } - rtidptr := rt2id(reflect.PtrTo(rt)) - *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}}) - return -} - -func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) { - for i := range o { - v = &o[i] - if v.rtid == rtid || v.rtidptr == rtid { - return - } - } - return nil -} - -func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) { - for i := range o { - v = &o[i] - if v.tag == tag { - return - } - } - return nil -} - -type intf2impl struct { - rtid uintptr // for intf - impl reflect.Type - // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned. -} - -type intf2impls []intf2impl - -// Intf2Impl maps an interface to an implementing type. -// This allows us support infering the concrete type -// and populating it when passed an interface. -// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc. -// -// Passing a nil impl will clear the mapping. -func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) { - if impl != nil && !impl.Implements(intf) { - return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf) - } - rtid := rt2id(intf) - o2 := *o - for i := range o2 { - v := &o2[i] - if v.rtid == rtid { - v.impl = impl - return - } - } - *o = append(o2, intf2impl{rtid, impl}) - return -} - -func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { - for i := range o { - v := &o[i] - if v.rtid == rtid { - if v.impl == nil { - return - } - if v.impl.Kind() == reflect.Ptr { - return reflect.New(v.impl.Elem()) - } - return reflect.New(v.impl).Elem() - } - } - return -} - -type structFieldInfoFlag uint8 - -const ( - _ structFieldInfoFlag = 1 << iota - structFieldInfoFlagReady - structFieldInfoFlagOmitEmpty -) - -func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) { - *x = *x | f -} - -func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) { - *x = *x &^ f -} - -func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool { - return x&f != 0 -} - -func (x structFieldInfoFlag) omitEmpty() bool { - return x.flagGet(structFieldInfoFlagOmitEmpty) -} - -func (x structFieldInfoFlag) ready() bool { - return x.flagGet(structFieldInfoFlagReady) -} - -type structFieldInfo struct { - encName string // encode name - fieldName string // field name - - is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct - nis uint8 // num levels of embedding. if 1, then it's not embedded. - - encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers - structFieldInfoFlag - _ [1]byte // padding -} - -func (si *structFieldInfo) setToZeroValue(v reflect.Value) { - if v, valid := si.field(v, false); valid { - v.Set(reflect.Zero(v.Type())) - } -} - -// rv returns the field of the struct. -// If anonymous, it returns an Invalid -func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { - // replicate FieldByIndex - for i, x := range si.is { - if uint8(i) == si.nis { - break - } - if v, valid = baseStructRv(v, update); !valid { - return - } - v = v.Field(int(x)) - } - - return v, true -} - -// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { -// v, _ = si.field(v, update) -// return v -// } - -func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { - keytype = valueTypeString // default - if stag == "" { - return - } - for i, s := range strings.Split(stag, ",") { - if i == 0 { - } else { - switch s { - case "omitempty": - omitEmpty = true - case "toarray": - toArray = true - case "int": - keytype = valueTypeInt - case "uint": - keytype = valueTypeUint - case "float": - keytype = valueTypeFloat - // case "bool": - // keytype = valueTypeBool - case "string": - keytype = valueTypeString - } - } - } - return -} - -func (si *structFieldInfo) parseTag(stag string) { - // if fname == "" { - // panic(errNoFieldNameToStructFieldInfo) - // } - - if stag == "" { - return - } - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s - } - } else { - switch s { - case "omitempty": - si.flagSet(structFieldInfoFlagOmitEmpty) - // si.omitEmpty = true - // case "toarray": - // si.toArray = true - } - } - } -} - -type sfiSortedByEncName []*structFieldInfo - -func (p sfiSortedByEncName) Len() int { return len(p) } -func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName } -func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -const structFieldNodeNumToCache = 4 - -type structFieldNodeCache struct { - rv [structFieldNodeNumToCache]reflect.Value - idx [structFieldNodeNumToCache]uint32 - num uint8 -} - -func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { - for i, k := range &x.idx { - if uint8(i) == x.num { - return // break - } - if key == k { - return x.rv[i], true - } - } - return -} - -func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { - if x.num < structFieldNodeNumToCache { - x.rv[x.num] = fv - x.idx[x.num] = key - x.num++ - return - } -} - -type structFieldNode struct { - v reflect.Value - cache2 structFieldNodeCache - cache3 structFieldNodeCache - update bool -} - -func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { - // return si.fieldval(x.v, x.update) - // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding - // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. - var valid bool - switch si.nis { - case 1: - fv = x.v.Field(int(si.is[0])) - case 2: - if fv, valid = x.cache2.get(uint32(si.is[0])); valid { - fv = fv.Field(int(si.is[1])) - return - } - fv = x.v.Field(int(si.is[0])) - if fv, valid = baseStructRv(fv, x.update); !valid { - return - } - x.cache2.tryAdd(fv, uint32(si.is[0])) - fv = fv.Field(int(si.is[1])) - case 3: - var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) - if fv, valid = x.cache3.get(key); valid { - fv = fv.Field(int(si.is[2])) - return - } - fv = x.v.Field(int(si.is[0])) - if fv, valid = baseStructRv(fv, x.update); !valid { - return - } - fv = fv.Field(int(si.is[1])) - if fv, valid = baseStructRv(fv, x.update); !valid { - return - } - x.cache3.tryAdd(fv, key) - fv = fv.Field(int(si.is[2])) - default: - fv, _ = si.field(x.v, x.update) - } - return -} - -func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - if !update { - return - } - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - return v, true -} - -type typeInfoFlag uint8 - -const ( - typeInfoFlagComparable = 1 << iota - typeInfoFlagIsZeroer - typeInfoFlagIsZeroerPtr -) - -// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence. -// -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - rt reflect.Type - elem reflect.Type - pkgpath string - - rtid uintptr - // rv0 reflect.Value // saved zero value, used if immutableKind - - numMeth uint16 // number of methods - kind uint8 - chandir uint8 - - anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty" - toArray bool // whether this (struct) type should be encoded as an array - keyType valueType // if struct, how is the field name stored in a stream? default is string - mbs bool // base type (T or *T) is a MapBySlice - - // ---- cpu cache line boundary? - sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array. - - key reflect.Type - - // ---- cpu cache line boundary? - // sfis []structFieldInfo // all sfi, in src order, as created. - sfiNamesSort []byte // all names, with indexes into the sfiSort - - // format of marshal type fields below: [btj][mu]p? OR csp? - - bm bool // T is a binaryMarshaler - bmp bool // *T is a binaryMarshaler - bu bool // T is a binaryUnmarshaler - bup bool // *T is a binaryUnmarshaler - tm bool // T is a textMarshaler - tmp bool // *T is a textMarshaler - tu bool // T is a textUnmarshaler - tup bool // *T is a textUnmarshaler - - jm bool // T is a jsonMarshaler - jmp bool // *T is a jsonMarshaler - ju bool // T is a jsonUnmarshaler - jup bool // *T is a jsonUnmarshaler - cs bool // T is a Selfer - csp bool // *T is a Selfer - mf bool // T is a MissingFielder - mfp bool // *T is a MissingFielder - - // other flags, with individual bits representing if set. - flags typeInfoFlag - infoFieldOmitempty bool - - _ [6]byte // padding - _ [2]uint64 // padding -} - -func (ti *typeInfo) isFlag(f typeInfoFlag) bool { - return ti.flags&f != 0 -} - -func (ti *typeInfo) indexForEncName(name []byte) (index int16) { - var sn []byte - if len(name)+2 <= 32 { - var buf [32]byte // should not escape to heap - sn = buf[:len(name)+2] - } else { - sn = make([]byte, len(name)+2) - } - copy(sn[1:], name) - sn[0], sn[len(sn)-1] = tiSep2(name), 0xff - j := bytes.Index(ti.sfiNamesSort, sn) - if j < 0 { - return -1 - } - index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8) - return -} - -type rtid2ti struct { - rtid uintptr - ti *typeInfo -} - -// TypeInfos caches typeInfo for each type on first inspection. -// -// It is configured with a set of tag keys, which are used to get -// configuration for the type. -type TypeInfos struct { - // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected - infos atomicTypeInfoSlice - mu sync.Mutex - tags []string - _ [2]uint64 // padding -} - -// NewTypeInfos creates a TypeInfos given a set of struct tags keys. -// -// This allows users customize the struct tag keys which contain configuration -// of their types. -func NewTypeInfos(tags []string) *TypeInfos { - return &TypeInfos{tags: tags} -} - -func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { - // check for tags: codec, json, in that order. - // this allows seamless support for many configured structs. - for _, x := range x.tags { - s = t.Get(x) - if s != "" { - return s - } - } - return -} - -func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) { - // binary search. adapted from sort/search.go. - // Note: we use goto (instead of for loop) so this can be inlined. - - // if sp == nil { - // return -1, nil - // } - // s := *sp - - // h, i, j := 0, 0, len(s) - var h uint // var h, i uint - var j = uint(len(s)) -LOOP: - if i < j { - h = i + (j-i)/2 - if s[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - goto LOOP - } - if i < uint(len(s)) && s[i].rtid == rtid { - ti = s[i].ti - } - return -} - -func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - sp := x.infos.load() - if sp != nil { - _, pti = findTypeInfo(sp, rtid) - if pti != nil { - return - } - } - - rk := rt.Kind() - - if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { - panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt) - } - - // do not hold lock while computing this. - // it may lead to duplication, but that's ok. - ti := typeInfo{ - rt: rt, - rtid: rtid, - kind: uint8(rk), - pkgpath: rt.PkgPath(), - keyType: valueTypeString, // default it - so it's never 0 - } - // ti.rv0 = reflect.Zero(rt) - - // ti.comparable = rt.Comparable() - ti.numMeth = uint16(rt.NumMethod()) - - ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp) - ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp) - ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp) - ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp) - ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp) - ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp) - ti.cs, ti.csp = implIntf(rt, selferTyp) - ti.mf, ti.mfp = implIntf(rt, missingFielderTyp) - - b1, b2 := implIntf(rt, iszeroTyp) - if b1 { - ti.flags |= typeInfoFlagIsZeroer - } - if b2 { - ti.flags |= typeInfoFlagIsZeroerPtr - } - if rt.Comparable() { - ti.flags |= typeInfoFlagComparable - } - - switch rk { - case reflect.Struct: - var omitEmpty bool - if f, ok := rt.FieldByName(structInfoFieldName); ok { - ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag)) - ti.infoFieldOmitempty = omitEmpty - } else { - ti.keyType = valueTypeString - } - pp, pi := &pool.tiload, pool.tiload.Get() // pool.tiLoad() - pv := pi.(*typeInfoLoadArray) - pv.etypes[0] = ti.rtid - // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} - vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]} - x.rget(rt, rtid, omitEmpty, nil, &vv) - // ti.sfis = vv.sfis - ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv) - pp.Put(pi) - case reflect.Map: - ti.elem = rt.Elem() - ti.key = rt.Key() - case reflect.Slice: - ti.mbs, _ = implIntf(rt, mapBySliceTyp) - ti.elem = rt.Elem() - case reflect.Chan: - ti.elem = rt.Elem() - ti.chandir = uint8(rt.ChanDir()) - case reflect.Array, reflect.Ptr: - ti.elem = rt.Elem() - } - // sfi = sfiSrc - - x.mu.Lock() - sp = x.infos.load() - var sp2 []rtid2ti - if sp == nil { - pti = &ti - sp2 = []rtid2ti{{rtid, pti}} - x.infos.store(sp2) - } else { - var idx uint - idx, pti = findTypeInfo(sp, rtid) - if pti == nil { - pti = &ti - sp2 = make([]rtid2ti, len(sp)+1) - copy(sp2, sp[:idx]) - copy(sp2[idx+1:], sp[idx:]) - sp2[idx] = rtid2ti{rtid, pti} - x.infos.store(sp2) - } - } - x.mu.Unlock() - return -} - -func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, - indexstack []uint16, pv *typeInfoLoad) { - // Read up fields and store how to access the value. - // - // It uses go's rules for message selectors, - // which say that the field with the shallowest depth is selected. - // - // Note: we consciously use slices, not a map, to simulate a set. - // Typically, types have < 16 fields, - // and iteration using equals is faster than maps there - flen := rt.NumField() - if flen > (1< %v fields are not supported - has %v fields", - (1<= 0; i-- { // bounds-check elimination - b := si.encName[i] - if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') { - continue - } - si.encNameAsciiAlphaNum = false - break - } - si.fieldName = f.Name - si.flagSet(structFieldInfoFlagReady) - - // pv.encNames = append(pv.encNames, si.encName) - - // si.ikind = int(f.Type.Kind()) - if len(indexstack) > maxLevelsEmbedding-1 { - panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth", - maxLevelsEmbedding-1, len(indexstack)) - } - si.nis = uint8(len(indexstack)) + 1 - copy(si.is[:], indexstack) - si.is[len(indexstack)] = j - - if omitEmpty { - si.flagSet(structFieldInfoFlagOmitEmpty) - } - pv.sfis = append(pv.sfis, si) - } -} - -func tiSep(name string) uint8 { - // (xn[0]%64) // (between 192-255 - outside ascii BMP) - // return 0xfe - (name[0] & 63) - // return 0xfe - (name[0] & 63) - uint8(len(name)) - // return 0xfe - (name[0] & 63) - uint8(len(name)&63) - // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07)) - return 0xfe - (name[0] & 63) - uint8(len(name)&63) -} - -func tiSep2(name []byte) uint8 { - return 0xfe - (name[0] & 63) - uint8(len(name)&63) -} - -// resolves the struct field info got from a call to rget. -// Returns a trimmed, unsorted and sorted []*structFieldInfo. -func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) ( - y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) { - sa := pv.sfiidx[:0] - sn := pv.b[:] - n := len(x) - - var xn string - var ui uint16 - var sep byte - - for i := range x { - ui = uint16(i) - xn = x[i].encName // fieldName or encName? use encName for now. - if len(xn)+2 > cap(pv.b) { - sn = make([]byte, len(xn)+2) - } else { - sn = sn[:len(xn)+2] - } - // use a custom sep, so that misses are less frequent, - // since the sep (first char in search) is as unique as first char in field name. - sep = tiSep(xn) - sn[0], sn[len(sn)-1] = sep, 0xff - copy(sn[1:], xn) - j := bytes.Index(sa, sn) - if j == -1 { - sa = append(sa, sep) - sa = append(sa, xn...) - sa = append(sa, 0xff, byte(ui>>8), byte(ui)) - } else { - index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8 - // one of them must be reset to nil, - // and the index updated appropriately to the other one - if x[i].nis == x[index].nis { - } else if x[i].nis < x[index].nis { - sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui) - if x[index].ready() { - x[index].flagClr(structFieldInfoFlagReady) - n-- - } - } else { - if x[i].ready() { - x[i].flagClr(structFieldInfoFlagReady) - n-- - } - } - } - - } - var w []structFieldInfo - sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray - if sharingArray { - w = make([]structFieldInfo, n) - } - - // remove all the nils (non-ready) - y = make([]*structFieldInfo, n) - n = 0 - var sslen int - for i := range x { - if !x[i].ready() { - continue - } - if !anyOmitEmpty && x[i].omitEmpty() { - anyOmitEmpty = true - } - if sharingArray { - w[n] = x[i] - y[n] = &w[n] - } else { - y[n] = &x[i] - } - sslen = sslen + len(x[i].encName) + 4 - n++ - } - if n != len(y) { - panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", - rt, len(y), len(x), n) - } - - z = make([]*structFieldInfo, len(y)) - copy(z, y) - sort.Sort(sfiSortedByEncName(z)) - - sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen - if sharingArray { - ss = make([]byte, 0, sslen) - } else { - ss = sa[:0] // reuse the newly made sa array if necessary - } - for i := range z { - xn = z[i].encName - sep = tiSep(xn) - ui = uint16(i) - ss = append(ss, sep) - ss = append(ss, xn...) - ss = append(ss, 0xff, byte(ui>>8), byte(ui)) - } - return -} - -func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { - return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) -} - -// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty: -// - does it implement IsZero() bool -// - is it comparable, and can i compare directly using == -// - if checkStruct, then walk through the encodable fields -// and check if they are empty or not. -func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { - // v is a struct kind - no need to check again. - // We only check isZero on a struct kind, to reduce the amount of times - // that we lookup the rtid and typeInfo for each type as we walk the tree. - - vt := v.Type() - rtid := rt2id(vt) - if tinfos == nil { - tinfos = defTypeInfos - } - ti := tinfos.get(rtid, vt) - if ti.rtid == timeTypId { - return rv2i(v).(time.Time).IsZero() - } - if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() { - return rv2i(v.Addr()).(isZeroer).IsZero() - } - if ti.isFlag(typeInfoFlagIsZeroer) { - return rv2i(v).(isZeroer).IsZero() - } - if ti.isFlag(typeInfoFlagComparable) { - return rv2i(v) == rv2i(reflect.Zero(vt)) - } - if !checkStruct { - return false - } - // We only care about what we can encode/decode, - // so that is what we use to check omitEmpty. - for _, si := range ti.sfiSrc { - sfv, valid := si.field(v, false) - if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) { - return false - } - } - return true -} - -// func roundFloat(x float64) float64 { -// t := math.Trunc(x) -// if math.Abs(x-t) >= 0.5 { -// return t + math.Copysign(1, x) -// } -// return t -// } - -func panicToErr(h errDecorator, err *error) { - // Note: This method MUST be called directly from defer i.e. defer panicToErr ... - // else it seems the recover is not fully handled - if recoverPanicToErr { - if x := recover(); x != nil { - // fmt.Printf("panic'ing with: %v\n", x) - // debug.PrintStack() - panicValToErr(h, x, err) - } - } -} - -func panicValToErr(h errDecorator, v interface{}, err *error) { - switch xerr := v.(type) { - case nil: - case error: - switch xerr { - case nil: - case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized: - // treat as special (bubble up) - *err = xerr - default: - h.wrapErr(xerr, err) - } - case string: - if xerr != "" { - h.wrapErr(xerr, err) - } - case fmt.Stringer: - if xerr != nil { - h.wrapErr(xerr, err) - } - default: - h.wrapErr(v, err) - } -} - -func isImmutableKind(k reflect.Kind) (v bool) { - // return immutableKindsSet[k] - // since we know reflect.Kind is in range 0..31, then use the k%32 == k constraint - return immutableKindsSet[k%reflect.Kind(len(immutableKindsSet))] // bounds-check-elimination -} - -// ---- - -type codecFnInfo struct { - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType - addrD bool - addrF bool // if addrD, this says whether decode function can take a value or a ptr - addrE bool -} - -// codecFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type codecFn struct { - i codecFnInfo - fe func(*Encoder, *codecFnInfo, reflect.Value) - fd func(*Decoder, *codecFnInfo, reflect.Value) - _ [1]uint64 // padding -} - -type codecRtidFn struct { - rtid uintptr - fn *codecFn -} - -// ---- - -// these "checkOverflow" functions must be inlinable, and not call anybody. -// Overflow means that the value cannot be represented without wrapping/overflow. -// Overflow=false does not mean that the value can be represented without losing precision -// (especially for floating point). - -type checkOverflow struct{} - -// func (checkOverflow) Float16(f float64) (overflow bool) { -// panicv.errorf("unimplemented") -// if f < 0 { -// f = -f -// } -// return math.MaxFloat32 < f && f <= math.MaxFloat64 -// } - -func (checkOverflow) Float32(v float64) (overflow bool) { - if v < 0 { - v = -v - } - return math.MaxFloat32 < v && v <= math.MaxFloat64 -} -func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} -func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} -func (checkOverflow) SignedInt(v uint64) (overflow bool) { - //e.g. -127 to 128 for int8 - pos := (v >> 63) == 0 - ui2 := v & 0x7fffffffffffffff - if pos { - if ui2 > math.MaxInt64 { - overflow = true - } - } else { - if ui2 > math.MaxInt64-1 { - overflow = true - } - } - return -} - -func (x checkOverflow) Float32V(v float64) float64 { - if x.Float32(v) { - panicv.errorf("float32 overflow: %v", v) - } - return v -} -func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 { - if x.Uint(v, bitsize) { - panicv.errorf("uint64 overflow: %v", v) - } - return v -} -func (x checkOverflow) IntV(v int64, bitsize uint8) int64 { - if x.Int(v, bitsize) { - panicv.errorf("int64 overflow: %v", v) - } - return v -} -func (x checkOverflow) SignedIntV(v uint64) int64 { - if x.SignedInt(v) { - panicv.errorf("uint64 to int64 overflow: %v", v) - } - return int64(v) -} - -// ------------------ SORT ----------------- - -func isNaN(f float64) bool { return f != f } - -// ----------------------- - -type ioFlusher interface { - Flush() error -} - -type ioPeeker interface { - Peek(int) ([]byte, error) -} - -type ioBuffered interface { - Buffered() int -} - -// ----------------------- - -type intSlice []int64 -type uintSlice []uint64 - -// type uintptrSlice []uintptr -type floatSlice []float64 -type boolSlice []bool -type stringSlice []string - -// type bytesSlice [][]byte - -func (p intSlice) Len() int { return len(p) } -func (p intSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } -func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p uintSlice) Len() int { return len(p) } -func (p uintSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } -func (p uintSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -// func (p uintptrSlice) Len() int { return len(p) } -// func (p uintptrSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } -// func (p uintptrSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p floatSlice) Len() int { return len(p) } -func (p floatSlice) Less(i, j int) bool { - return p[uint(i)] < p[uint(j)] || isNaN(p[uint(i)]) && !isNaN(p[uint(j)]) -} -func (p floatSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p stringSlice) Len() int { return len(p) } -func (p stringSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } -func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -// func (p bytesSlice) Len() int { return len(p) } -// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)], p[uint(j)]) == -1 } -// func (p bytesSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p boolSlice) Len() int { return len(p) } -func (p boolSlice) Less(i, j int) bool { return !p[uint(i)] && p[uint(j)] } -func (p boolSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -// --------------------- - -type sfiRv struct { - v *structFieldInfo - r reflect.Value -} - -type intRv struct { - v int64 - r reflect.Value -} -type intRvSlice []intRv -type uintRv struct { - v uint64 - r reflect.Value -} -type uintRvSlice []uintRv -type floatRv struct { - v float64 - r reflect.Value -} -type floatRvSlice []floatRv -type boolRv struct { - v bool - r reflect.Value -} -type boolRvSlice []boolRv -type stringRv struct { - v string - r reflect.Value -} -type stringRvSlice []stringRv -type bytesRv struct { - v []byte - r reflect.Value -} -type bytesRvSlice []bytesRv -type timeRv struct { - v time.Time - r reflect.Value -} -type timeRvSlice []timeRv - -func (p intRvSlice) Len() int { return len(p) } -func (p intRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } -func (p intRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p uintRvSlice) Len() int { return len(p) } -func (p uintRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } -func (p uintRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p floatRvSlice) Len() int { return len(p) } -func (p floatRvSlice) Less(i, j int) bool { - return p[uint(i)].v < p[uint(j)].v || isNaN(p[uint(i)].v) && !isNaN(p[uint(j)].v) -} -func (p floatRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p stringRvSlice) Len() int { return len(p) } -func (p stringRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } -func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p bytesRvSlice) Len() int { return len(p) } -func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } -func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p boolRvSlice) Len() int { return len(p) } -func (p boolRvSlice) Less(i, j int) bool { return !p[uint(i)].v && p[uint(j)].v } -func (p boolRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -func (p timeRvSlice) Len() int { return len(p) } -func (p timeRvSlice) Less(i, j int) bool { return p[uint(i)].v.Before(p[uint(j)].v) } -func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -// ----------------- - -type bytesI struct { - v []byte - i interface{} -} - -type bytesISlice []bytesI - -func (p bytesISlice) Len() int { return len(p) } -func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } -func (p bytesISlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } - -// ----------------- - -type set []uintptr - -func (s *set) add(v uintptr) (exists bool) { - // e.ci is always nil, or len >= 1 - x := *s - if x == nil { - x = make([]uintptr, 1, 8) - x[0] = v - *s = x - return - } - // typically, length will be 1. make this perform. - if len(x) == 1 { - if j := x[0]; j == 0 { - x[0] = v - } else if j == v { - exists = true - } else { - x = append(x, v) - *s = x - } - return - } - // check if it exists - for _, j := range x { - if j == v { - exists = true - return - } - } - // try to replace a "deleted" slot - for i, j := range x { - if j == 0 { - x[i] = v - return - } - } - // if unable to replace deleted slot, just append it. - x = append(x, v) - *s = x - return -} - -func (s *set) remove(v uintptr) (exists bool) { - x := *s - if len(x) == 0 { +func checkOverflow(ui uint64, i int64, bitsize uint8) { + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize == 0 { return } - if len(x) == 1 { - if x[0] == v { - x[0] = 0 + if i != 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) } - return } - for i, j := range x { - if j == v { - exists = true - x[i] = 0 // set it to 0, as way to delete it. - // copy(x[i:], x[i+1:]) - // x = x[:len(x)-1] - return + if ui != 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) } } - return -} - -// ------ - -// bitset types are better than [256]bool, because they permit the whole -// bitset array being on a single cache line and use less memory. -// -// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap). -// -// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces -// bounds checking, so we discarded them, and everyone uses bitset256. -// -// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). -// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 - -type bitset256 [32]byte - -func (x *bitset256) isset(pos byte) bool { - return x[pos>>3]&(1<<(pos&7)) != 0 -} - -// func (x *bitset256) issetv(pos byte) byte { -// return x[pos>>3] & (1 << (pos & 7)) -// } - -func (x *bitset256) set(pos byte) { - x[pos>>3] |= (1 << (pos & 7)) -} - -// func (x *bitset256) unset(pos byte) { -// x[pos>>3] &^= (1 << (pos & 7)) -// } - -// type bit2set256 [64]byte - -// func (x *bit2set256) set(pos byte, v1, v2 bool) { -// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 -// if v1 { -// x[pos>>2] |= 1 << (pos2 + 1) -// } -// if v2 { -// x[pos>>2] |= 1 << pos2 -// } -// } -// func (x *bit2set256) get(pos byte) uint8 { -// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 -// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011 -// } - -// ------------ - -type pooler struct { - // function-scoped pooled resources - tiload sync.Pool // for type info loading - sfiRv8, sfiRv16, sfiRv32, sfiRv64, sfiRv128 sync.Pool // for struct encoding - - // lifetime-scoped pooled resources - // dn sync.Pool // for decNaked - buf1k, buf2k, buf4k, buf8k, buf16k, buf32k, buf64k sync.Pool // for [N]byte -} - -func (p *pooler) init() { - p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } - - p.sfiRv8.New = func() interface{} { return new([8]sfiRv) } - p.sfiRv16.New = func() interface{} { return new([16]sfiRv) } - p.sfiRv32.New = func() interface{} { return new([32]sfiRv) } - p.sfiRv64.New = func() interface{} { return new([64]sfiRv) } - p.sfiRv128.New = func() interface{} { return new([128]sfiRv) } - - // p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } - - p.buf1k.New = func() interface{} { return new([1 * 1024]byte) } - p.buf2k.New = func() interface{} { return new([2 * 1024]byte) } - p.buf4k.New = func() interface{} { return new([4 * 1024]byte) } - p.buf8k.New = func() interface{} { return new([8 * 1024]byte) } - p.buf16k.New = func() interface{} { return new([16 * 1024]byte) } - p.buf32k.New = func() interface{} { return new([32 * 1024]byte) } - p.buf64k.New = func() interface{} { return new([64 * 1024]byte) } - -} - -// func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) { -// return &p.strRv8, p.strRv8.Get() -// } -// func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) { -// return &p.strRv16, p.strRv16.Get() -// } -// func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) { -// return &p.strRv32, p.strRv32.Get() -// } -// func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) { -// return &p.strRv64, p.strRv64.Get() -// } -// func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) { -// return &p.strRv128, p.strRv128.Get() -// } - -// func (p *pooler) bytes1k() (sp *sync.Pool, v interface{}) { -// return &p.buf1k, p.buf1k.Get() -// } -// func (p *pooler) bytes2k() (sp *sync.Pool, v interface{}) { -// return &p.buf2k, p.buf2k.Get() -// } -// func (p *pooler) bytes4k() (sp *sync.Pool, v interface{}) { -// return &p.buf4k, p.buf4k.Get() -// } -// func (p *pooler) bytes8k() (sp *sync.Pool, v interface{}) { -// return &p.buf8k, p.buf8k.Get() -// } -// func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) { -// return &p.buf16k, p.buf16k.Get() -// } -// func (p *pooler) bytes32k() (sp *sync.Pool, v interface{}) { -// return &p.buf32k, p.buf32k.Get() -// } -// func (p *pooler) bytes64k() (sp *sync.Pool, v interface{}) { -// return &p.buf64k, p.buf64k.Get() -// } - -// func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { -// return &p.tiload, p.tiload.Get() -// } - -// func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { -// return &p.dn, p.dn.Get() -// } - -// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) { -// sp := &(p.dn) -// vv := sp.Get() -// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) } -// } -// func (p *pooler) decNakedGet() (v interface{}) { -// return p.dn.Get() -// } -// func (p *pooler) tiLoadGet() (v interface{}) { -// return p.tiload.Get() -// } -// func (p *pooler) decNakedPut(v interface{}) { -// p.dn.Put(v) -// } -// func (p *pooler) tiLoadPut(v interface{}) { -// p.tiload.Put(v) -// } - -// ---------------------------------------------------- - -type panicHdl struct{} - -func (panicHdl) errorv(err error) { - if err != nil { - panic(err) - } -} - -func (panicHdl) errorstr(message string) { - if message != "" { - panic(message) - } -} - -func (panicHdl) errorf(format string, params ...interface{}) { - if format == "" { - } else if len(params) == 0 { - panic(format) - } else { - panic(fmt.Sprintf(format, params...)) - } -} - -// ---------------------------------------------------- - -type errDecorator interface { - wrapErr(in interface{}, out *error) -} - -type errDecoratorDef struct{} - -func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) } - -// ---------------------------------------------------- - -type must struct{} - -func (must) String(s string, err error) string { - if err != nil { - panicv.errorv(err) - } - return s -} -func (must) Int(s int64, err error) int64 { - if err != nil { - panicv.errorv(err) - } - return s -} -func (must) Uint(s uint64, err error) uint64 { - if err != nil { - panicv.errorv(err) - } - return s } -func (must) Float(s float64, err error) float64 { - if err != nil { - panicv.errorv(err) - } - return s -} - -// ------------------- - -type bytesBufPooler struct { - pool *sync.Pool - poolbuf interface{} -} - -func (z *bytesBufPooler) end() { - if z.pool != nil { - z.pool.Put(z.poolbuf) - z.pool, z.poolbuf = nil, nil - } -} - -func (z *bytesBufPooler) get(bufsize int) (buf []byte) { - // ensure an end is called first (if necessary) - if z.pool != nil { - z.pool.Put(z.poolbuf) - z.pool, z.poolbuf = nil, nil - } - - // // Try to use binary search. - // // This is not optimal, as most folks select 1k or 2k buffers - // // so a linear search is better (sequence of if/else blocks) - // if bufsize < 1 { - // bufsize = 0 - // } else { - // bufsize-- - // bufsize /= 1024 - // } - // switch bufsize { - // case 0: - // z.pool, z.poolbuf = pool.bytes1k() - // buf = z.poolbuf.(*[1 * 1024]byte)[:] - // case 1: - // z.pool, z.poolbuf = pool.bytes2k() - // buf = z.poolbuf.(*[2 * 1024]byte)[:] - // case 2, 3: - // z.pool, z.poolbuf = pool.bytes4k() - // buf = z.poolbuf.(*[4 * 1024]byte)[:] - // case 4, 5, 6, 7: - // z.pool, z.poolbuf = pool.bytes8k() - // buf = z.poolbuf.(*[8 * 1024]byte)[:] - // case 8, 9, 10, 11, 12, 13, 14, 15: - // z.pool, z.poolbuf = pool.bytes16k() - // buf = z.poolbuf.(*[16 * 1024]byte)[:] - // case 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31: - // z.pool, z.poolbuf = pool.bytes32k() - // buf = z.poolbuf.(*[32 * 1024]byte)[:] - // default: - // z.pool, z.poolbuf = pool.bytes64k() - // buf = z.poolbuf.(*[64 * 1024]byte)[:] - // } - // return - - if bufsize <= 1*1024 { - z.pool, z.poolbuf = &pool.buf1k, pool.buf1k.Get() // pool.bytes1k() - buf = z.poolbuf.(*[1 * 1024]byte)[:] - } else if bufsize <= 2*1024 { - z.pool, z.poolbuf = &pool.buf2k, pool.buf2k.Get() // pool.bytes2k() - buf = z.poolbuf.(*[2 * 1024]byte)[:] - } else if bufsize <= 4*1024 { - z.pool, z.poolbuf = &pool.buf4k, pool.buf4k.Get() // pool.bytes4k() - buf = z.poolbuf.(*[4 * 1024]byte)[:] - } else if bufsize <= 8*1024 { - z.pool, z.poolbuf = &pool.buf8k, pool.buf8k.Get() // pool.bytes8k() - buf = z.poolbuf.(*[8 * 1024]byte)[:] - } else if bufsize <= 16*1024 { - z.pool, z.poolbuf = &pool.buf16k, pool.buf16k.Get() // pool.bytes16k() - buf = z.poolbuf.(*[16 * 1024]byte)[:] - } else if bufsize <= 32*1024 { - z.pool, z.poolbuf = &pool.buf32k, pool.buf32k.Get() // pool.bytes32k() - buf = z.poolbuf.(*[32 * 1024]byte)[:] - } else { - z.pool, z.poolbuf = &pool.buf64k, pool.buf64k.Get() // pool.bytes64k() - buf = z.poolbuf.(*[64 * 1024]byte)[:] - } - return -} - -// ---------------- - -type sfiRvPooler struct { - pool *sync.Pool - poolv interface{} -} - -func (z *sfiRvPooler) end() { - if z.pool != nil { - z.pool.Put(z.poolv) - z.pool, z.poolv = nil, nil - } -} - -func (z *sfiRvPooler) get(newlen int) (fkvs []sfiRv) { - if newlen < 0 { // bounds-check-elimination - // cannot happen // here for bounds-check-elimination - } else if newlen <= 8 { - z.pool, z.poolv = &pool.sfiRv8, pool.sfiRv8.Get() // pool.sfiRv8() - fkvs = z.poolv.(*[8]sfiRv)[:newlen] - } else if newlen <= 16 { - z.pool, z.poolv = &pool.sfiRv16, pool.sfiRv16.Get() // pool.sfiRv16() - fkvs = z.poolv.(*[16]sfiRv)[:newlen] - } else if newlen <= 32 { - z.pool, z.poolv = &pool.sfiRv32, pool.sfiRv32.Get() // pool.sfiRv32() - fkvs = z.poolv.(*[32]sfiRv)[:newlen] - } else if newlen <= 64 { - z.pool, z.poolv = &pool.sfiRv64, pool.sfiRv64.Get() // pool.sfiRv64() - fkvs = z.poolv.(*[64]sfiRv)[:newlen] - } else if newlen <= 128 { - z.pool, z.poolv = &pool.sfiRv128, pool.sfiRv128.Get() // pool.sfiRv128() - fkvs = z.poolv.(*[128]sfiRv)[:newlen] - } else { - fkvs = make([]sfiRv, newlen) - } - return -} - -// xdebugf printf. the message in red on the terminal. -// Use it in place of fmt.Printf (which it calls internally) -func xdebugf(pattern string, args ...interface{}) { - var delim string - if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' { - delim = "\n" - } - fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...) -} - -// func isImmutableKind(k reflect.Kind) (v bool) { -// return false || -// k == reflect.Int || -// k == reflect.Int8 || -// k == reflect.Int16 || -// k == reflect.Int32 || -// k == reflect.Int64 || -// k == reflect.Uint || -// k == reflect.Uint8 || -// k == reflect.Uint16 || -// k == reflect.Uint32 || -// k == reflect.Uint64 || -// k == reflect.Uintptr || -// k == reflect.Float32 || -// k == reflect.Float64 || -// k == reflect.Bool || -// k == reflect.String -// } - -// func timeLocUTCName(tzint int16) string { -// if tzint == 0 { -// return "UTC" -// } -// var tzname = []byte("UTC+00:00") -// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf.. inline below. -// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first -// var tzhr, tzmin int16 -// if tzint < 0 { -// tzname[3] = '-' // (TODO: verify. this works here) -// tzhr, tzmin = -tzint/60, (-tzint)%60 -// } else { -// tzhr, tzmin = tzint/60, tzint%60 -// } -// tzname[4] = timeDigits[tzhr/10] -// tzname[5] = timeDigits[tzhr%10] -// tzname[7] = timeDigits[tzmin/10] -// tzname[8] = timeDigits[tzmin%10] -// return string(tzname) -// //return time.FixedZone(string(tzname), int(tzint)*60) -// } diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go index 0cbd665e..93f12854 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -1,121 +1,132 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. package codec // All non-std package dependencies live in this file, // so porting to different environment is easy (just update functions). -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } +import ( + "errors" + "fmt" + "math" + "reflect" +) + +var ( + raisePanicAfterRecover = false + debugging = true +) + +func panicValToErr(panicVal interface{}, err *error) { + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + if raisePanicAfterRecover { + panic(panicVal) } return } -// validate that this function is correct ... -// culled from OGRE (Object-Oriented Graphics Rendering Engine) -// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) -func halfFloatToFloatBits(yy uint16) (d uint32) { - y := uint32(yy) - s := (y >> 15) & 0x01 - e := (y >> 10) & 0x1f - m := y & 0x03ff - - if e == 0 { - if m == 0 { // plu or minus 0 - return s << 31 +func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return hIsEmptyValue(v.Elem(), deref, checkStruct) + } else { + return v.IsNil() } - // Denormalized number -- renormalize it - for (m & 0x00000400) == 0 { - m <<= 1 - e -= 1 + case reflect.Struct: + if !checkStruct { + return false } - e += 1 - const zz uint32 = 0x0400 - m &= ^zz - } else if e == 31 { - if m == 0 { // Inf - return (s << 31) | 0x7f800000 + // return true if all fields are empty. else return false. + + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !hIsEmptyValue(v.Field(i), deref, checkStruct) { + return false + } } - return (s << 31) | 0x7f800000 | (m << 13) // NaN + return true } - e = e + (127 - 15) - m = m << 13 - return (s << 31) | (e << 23) | m + return false } -// GrowCap will return a new capacity for a slice, given the following: -// - oldCap: current capacity -// - unit: in-memory size of an element -// - num: number of elements to add -func growCap(oldCap, unit, num int) (newCap int) { - // appendslice logic (if cap < 1024, *2, else *1.25): - // leads to many copy calls, especially when copying bytes. - // bytes.Buffer model (2*cap + n): much better for bytes. - // smarter way is to take the byte-size of the appended element(type) into account - - // maintain 3 thresholds: - // t1: if cap <= t1, newcap = 2x - // t2: if cap <= t2, newcap = 1.75x - // t3: if cap <= t3, newcap = 1.5x - // else newcap = 1.25x - // - // t1, t2, t3 >= 1024 always. - // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) - // - // With this, appending for bytes increase by: - // 100% up to 4K - // 75% up to 8K - // 50% up to 16K - // 25% beyond that +func isEmptyValue(v reflect.Value) bool { + return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) +} - // unit can be 0 e.g. for struct{}{}; handle that appropriately - var t1, t2, t3 int // thresholds - if unit <= 1 { - t1, t2, t3 = 4*1024, 8*1024, 16*1024 - } else if unit < 16 { - t3 = 16 / unit * 1024 - t1 = t3 * 1 / 4 - t2 = t3 * 2 / 4 - } else { - t1, t2, t3 = 1024, 1024, 1024 +func debugf(format string, args ...interface{}) { + if debugging { + if len(format) == 0 || format[len(format)-1] != '\n' { + format = format + "\n" + } + fmt.Printf(format, args...) } +} - var x int // temporary variable - - // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively - if oldCap <= t1 { // [0,t1] - x = 8 - } else if oldCap > t3 { // (t3,infinity] - x = 5 - } else if oldCap <= t2 { // (t1,t2] - x = 7 - } else { // (t2,t3] - x = 6 +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } } - newCap = x * oldCap / 4 + return +} - if num > 0 { - newCap += num +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return } - - // ensure newCap is a multiple of 64 (if it is > 64) or 16. - if newCap > 64 { - if x = newCap % 64; x != 0 { - x = newCap / 64 - newCap = 64 * (x + 1) + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir } - } else { - if x = newCap % 16; x != 0 { - x = newCap / 16 - newCap = 16 * (x + 1) + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue } + break } - return + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 } diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go deleted file mode 100644 index 74987f9f..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go +++ /dev/null @@ -1,331 +0,0 @@ -// +build !go1.7 safe appengine - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "reflect" - "sync/atomic" - "time" -) - -const safeMode = true - -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -// -// Usage: Always maintain a reference to v while result of this call is in use, -// and call keepAlive4BytesView(v) at point where done with view. -func stringView(v []byte) string { - return string(v) -} - -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -// -// Usage: Always maintain a reference to v while result of this call is in use, -// and call keepAlive4BytesView(v) at point where done with view. -func bytesView(v string) []byte { - return []byte(v) -} - -func definitelyNil(v interface{}) bool { - // this is a best-effort option. - // We just return false, so we don't unnecessarily incur the cost of reflection this early. - return false -} - -func rv2i(rv reflect.Value) interface{} { - return rv.Interface() -} - -func rt2id(rt reflect.Type) uintptr { - return reflect.ValueOf(rt).Pointer() -} - -// func rv2rtid(rv reflect.Value) uintptr { -// return reflect.ValueOf(rv.Type()).Pointer() -// } - -func i2rtid(i interface{}) uintptr { - return reflect.ValueOf(reflect.TypeOf(i)).Pointer() -} - -// -------------------------- - -func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) - } - return v.IsNil() - case reflect.Struct: - return isEmptyStruct(v, tinfos, deref, checkStruct) - } - return false -} - -// -------------------------- -// type ptrToRvMap struct{} - -// func (*ptrToRvMap) init() {} -// func (*ptrToRvMap) get(i interface{}) reflect.Value { -// return reflect.ValueOf(i).Elem() -// } - -// -------------------------- -type atomicClsErr struct { - v atomic.Value -} - -func (x *atomicClsErr) load() (e clsErr) { - if i := x.v.Load(); i != nil { - e = i.(clsErr) - } - return -} - -func (x *atomicClsErr) store(p clsErr) { - x.v.Store(p) -} - -// -------------------------- -type atomicTypeInfoSlice struct { // expected to be 2 words - v atomic.Value -} - -func (x *atomicTypeInfoSlice) load() (e []rtid2ti) { - if i := x.v.Load(); i != nil { - e = i.([]rtid2ti) - } - return -} - -func (x *atomicTypeInfoSlice) store(p []rtid2ti) { - x.v.Store(p) -} - -// -------------------------- -type atomicRtidFnSlice struct { // expected to be 2 words - v atomic.Value -} - -func (x *atomicRtidFnSlice) load() (e []codecRtidFn) { - if i := x.v.Load(); i != nil { - e = i.([]codecRtidFn) - } - return -} - -func (x *atomicRtidFnSlice) store(p []codecRtidFn) { - x.v.Store(p) -} - -// -------------------------- -func (n *decNaked) ru() reflect.Value { - return reflect.ValueOf(&n.u).Elem() -} -func (n *decNaked) ri() reflect.Value { - return reflect.ValueOf(&n.i).Elem() -} -func (n *decNaked) rf() reflect.Value { - return reflect.ValueOf(&n.f).Elem() -} -func (n *decNaked) rl() reflect.Value { - return reflect.ValueOf(&n.l).Elem() -} -func (n *decNaked) rs() reflect.Value { - return reflect.ValueOf(&n.s).Elem() -} -func (n *decNaked) rt() reflect.Value { - return reflect.ValueOf(&n.t).Elem() -} -func (n *decNaked) rb() reflect.Value { - return reflect.ValueOf(&n.b).Elem() -} - -// -------------------------- -func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { - rv.SetBytes(d.rawBytes()) -} - -func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { - rv.SetString(d.d.DecodeString()) -} - -func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { - rv.SetBool(d.d.DecodeBool()) -} - -func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { - rv.Set(reflect.ValueOf(d.d.DecodeTime())) -} - -func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { - fv := d.d.DecodeFloat64() - if chkOvf.Float32(fv) { - d.errorf("float32 overflow: %v", fv) - } - rv.SetFloat(fv) -} - -func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { - rv.SetFloat(d.d.DecodeFloat64()) -} - -func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) -} - -func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8)) -} - -func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16)) -} - -func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32)) -} - -func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { - rv.SetInt(d.d.DecodeInt64()) -} - -func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) -} - -func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) -} - -func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8)) -} - -func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16)) -} - -func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32)) -} - -func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { - rv.SetUint(d.d.DecodeUint64()) -} - -// ---------------- - -func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeBool(rv.Bool()) -} - -func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeTime(rv2i(rv).(time.Time)) -} - -func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { - s := rv.String() - if e.h.StringToRaw { - e.e.EncodeStringBytesRaw(bytesView(s)) - } else { - e.e.EncodeStringEnc(cUTF8, s) - } -} - -func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeFloat64(rv.Float()) -} - -func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeFloat32(float32(rv.Float())) -} - -func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeInt(rv.Int()) -} - -func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeInt(rv.Int()) -} - -func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeInt(rv.Int()) -} - -func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeInt(rv.Int()) -} - -func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeInt(rv.Int()) -} - -func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeUint(rv.Uint()) -} - -func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeUint(rv.Uint()) -} - -func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeUint(rv.Uint()) -} - -func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeUint(rv.Uint()) -} - -func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeUint(rv.Uint()) -} - -func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeUint(rv.Uint()) -} - -// // keepAlive4BytesView maintains a reference to the input parameter for bytesView. -// // -// // Usage: call this at point where done with the bytes view. -// func keepAlive4BytesView(v string) {} - -// // keepAlive4BytesView maintains a reference to the input parameter for stringView. -// // -// // Usage: call this at point where done with the string view. -// func keepAlive4StringView(v []byte) {} - -// func definitelyNil(v interface{}) bool { -// rv := reflect.ValueOf(v) -// switch rv.Kind() { -// case reflect.Invalid: -// return true -// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func: -// return rv.IsNil() -// default: -// return false -// } -// } diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go deleted file mode 100644 index 3bc34d90..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go +++ /dev/null @@ -1,745 +0,0 @@ -// +build !safe -// +build !appengine -// +build go1.7 - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "reflect" - "sync/atomic" - "time" - "unsafe" -) - -// This file has unsafe variants of some helper methods. -// NOTE: See helper_not_unsafe.go for the usage information. - -// var zeroRTv [4]uintptr - -const safeMode = false -const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go - -type unsafeString struct { - Data unsafe.Pointer - Len int -} - -type unsafeSlice struct { - Data unsafe.Pointer - Len int - Cap int -} - -type unsafeIntf struct { - typ unsafe.Pointer - word unsafe.Pointer -} - -type unsafeReflectValue struct { - typ unsafe.Pointer - ptr unsafe.Pointer - flag uintptr -} - -func stringView(v []byte) string { - if len(v) == 0 { - return "" - } - bx := (*unsafeSlice)(unsafe.Pointer(&v)) - return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len})) -} - -func bytesView(v string) []byte { - if len(v) == 0 { - return zeroByteSlice - } - sx := (*unsafeString)(unsafe.Pointer(&v)) - return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len})) -} - -func definitelyNil(v interface{}) bool { - // There is no global way of checking if an interface is nil. - // For true references (map, ptr, func, chan), you can just look - // at the word of the interface. However, for slices, you have to dereference - // the word, and get a pointer to the 3-word interface value. - // - // However, the following are cheap calls - // - TypeOf(interface): cheap 2-line call. - // - ValueOf(interface{}): expensive - // - type.Kind: cheap call through an interface - // - Value.Type(): cheap call - // except it's a method value (e.g. r.Read, which implies that it is a Func) - - return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil -} - -func rv2i(rv reflect.Value) interface{} { - // TODO: consider a more generally-known optimization for reflect.Value ==> Interface - // - // Currently, we use this fragile method that taps into implememtation details from - // the source go stdlib reflect/value.go, and trims the implementation. - - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir - var ptr unsafe.Pointer - if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { - ptr = *(*unsafe.Pointer)(urv.ptr) - } else { - ptr = urv.ptr - } - return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) -} - -func rt2id(rt reflect.Type) uintptr { - return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) -} - -// func rv2rtid(rv reflect.Value) uintptr { -// return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) -// } - -func i2rtid(i interface{}) uintptr { - return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ) -} - -// -------------------------- - -func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { - urv := (*unsafeReflectValue)(unsafe.Pointer(&v)) - if urv.flag == 0 { - return true - } - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.String: - return (*unsafeString)(urv.ptr).Len == 0 - case reflect.Slice: - return (*unsafeSlice)(urv.ptr).Len == 0 - case reflect.Bool: - return !*(*bool)(urv.ptr) - case reflect.Int: - return *(*int)(urv.ptr) == 0 - case reflect.Int8: - return *(*int8)(urv.ptr) == 0 - case reflect.Int16: - return *(*int16)(urv.ptr) == 0 - case reflect.Int32: - return *(*int32)(urv.ptr) == 0 - case reflect.Int64: - return *(*int64)(urv.ptr) == 0 - case reflect.Uint: - return *(*uint)(urv.ptr) == 0 - case reflect.Uint8: - return *(*uint8)(urv.ptr) == 0 - case reflect.Uint16: - return *(*uint16)(urv.ptr) == 0 - case reflect.Uint32: - return *(*uint32)(urv.ptr) == 0 - case reflect.Uint64: - return *(*uint64)(urv.ptr) == 0 - case reflect.Uintptr: - return *(*uintptr)(urv.ptr) == 0 - case reflect.Float32: - return *(*float32)(urv.ptr) == 0 - case reflect.Float64: - return *(*float64)(urv.ptr) == 0 - case reflect.Interface: - isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil - if deref { - if isnil { - return true - } - return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) - } - return isnil - case reflect.Ptr: - // isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type) - isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil - if deref { - if isnil { - return true - } - return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) - } - return isnil - case reflect.Struct: - return isEmptyStruct(v, tinfos, deref, checkStruct) - case reflect.Map, reflect.Array, reflect.Chan: - return v.Len() == 0 - } - return false -} - -// -------------------------- - -// atomicXXX is expected to be 2 words (for symmetry with atomic.Value) -// -// Note that we do not atomically load/store length and data pointer separately, -// as this could lead to some races. Instead, we atomically load/store cappedSlice. -// -// Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly. - -// ---------------------- -type atomicTypeInfoSlice struct { - v unsafe.Pointer // *[]rtid2ti - _ uintptr // padding (atomicXXX expected to be 2 words) -} - -func (x *atomicTypeInfoSlice) load() (s []rtid2ti) { - x2 := atomic.LoadPointer(&x.v) - if x2 != nil { - s = *(*[]rtid2ti)(x2) - } - return -} - -func (x *atomicTypeInfoSlice) store(p []rtid2ti) { - atomic.StorePointer(&x.v, unsafe.Pointer(&p)) -} - -// -------------------------- -type atomicRtidFnSlice struct { - v unsafe.Pointer // *[]codecRtidFn - _ uintptr // padding (atomicXXX expected to be 2 words) -} - -func (x *atomicRtidFnSlice) load() (s []codecRtidFn) { - x2 := atomic.LoadPointer(&x.v) - if x2 != nil { - s = *(*[]codecRtidFn)(x2) - } - return -} - -func (x *atomicRtidFnSlice) store(p []codecRtidFn) { - atomic.StorePointer(&x.v, unsafe.Pointer(&p)) -} - -// -------------------------- -type atomicClsErr struct { - v unsafe.Pointer // *clsErr - _ uintptr // padding (atomicXXX expected to be 2 words) -} - -func (x *atomicClsErr) load() (e clsErr) { - x2 := (*clsErr)(atomic.LoadPointer(&x.v)) - if x2 != nil { - e = *x2 - } - return -} - -func (x *atomicClsErr) store(p clsErr) { - atomic.StorePointer(&x.v, unsafe.Pointer(&p)) -} - -// -------------------------- - -// to create a reflect.Value for each member field of decNaked, -// we first create a global decNaked, and create reflect.Value -// for them all. -// This way, we have the flags and type in the reflect.Value. -// Then, when a reflect.Value is called, we just copy it, -// update the ptr to the decNaked's, and return it. - -type unsafeDecNakedWrapper struct { - decNaked - ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above -} - -func (n *unsafeDecNakedWrapper) init() { - n.ru = reflect.ValueOf(&n.u).Elem() - n.ri = reflect.ValueOf(&n.i).Elem() - n.rf = reflect.ValueOf(&n.f).Elem() - n.rl = reflect.ValueOf(&n.l).Elem() - n.rs = reflect.ValueOf(&n.s).Elem() - n.rt = reflect.ValueOf(&n.t).Elem() - n.rb = reflect.ValueOf(&n.b).Elem() - // n.rr[] = reflect.ValueOf(&n.) -} - -var defUnsafeDecNakedWrapper unsafeDecNakedWrapper - -func init() { - defUnsafeDecNakedWrapper.init() -} - -func (n *decNaked) ru() (v reflect.Value) { - v = defUnsafeDecNakedWrapper.ru - ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u) - return -} -func (n *decNaked) ri() (v reflect.Value) { - v = defUnsafeDecNakedWrapper.ri - ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i) - return -} -func (n *decNaked) rf() (v reflect.Value) { - v = defUnsafeDecNakedWrapper.rf - ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f) - return -} -func (n *decNaked) rl() (v reflect.Value) { - v = defUnsafeDecNakedWrapper.rl - ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l) - return -} -func (n *decNaked) rs() (v reflect.Value) { - v = defUnsafeDecNakedWrapper.rs - ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s) - return -} -func (n *decNaked) rt() (v reflect.Value) { - v = defUnsafeDecNakedWrapper.rt - ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t) - return -} -func (n *decNaked) rb() (v reflect.Value) { - v = defUnsafeDecNakedWrapper.rb - ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b) - return -} - -// -------------------------- -func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*[]byte)(urv.ptr) = d.rawBytes() -} - -func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*string)(urv.ptr) = d.d.DecodeString() -} - -func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*bool)(urv.ptr) = d.d.DecodeBool() -} - -func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*time.Time)(urv.ptr) = d.d.DecodeTime() -} - -func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { - fv := d.d.DecodeFloat64() - if chkOvf.Float32(fv) { - d.errorf("float32 overflow: %v", fv) - } - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*float32)(urv.ptr) = float32(fv) -} - -func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*float64)(urv.ptr) = d.d.DecodeFloat64() -} - -func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) -} - -func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) -} - -func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) -} - -func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) -} - -func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int64)(urv.ptr) = d.d.DecodeInt64() -} - -func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) -} - -func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) -} - -func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) -} - -func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) -} - -func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) -} - -func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint64)(urv.ptr) = d.d.DecodeUint64() -} - -// ------------ - -func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeBool(*(*bool)(v.ptr)) -} - -func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeTime(*(*time.Time)(v.ptr)) -} - -func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - s := *(*string)(v.ptr) - if e.h.StringToRaw { - e.e.EncodeStringBytesRaw(bytesView(s)) - } else { - e.e.EncodeStringEnc(cUTF8, s) - } -} - -func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeFloat64(*(*float64)(v.ptr)) -} - -func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeFloat32(*(*float32)(v.ptr)) -} - -func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeInt(int64(*(*int)(v.ptr))) -} - -func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeInt(int64(*(*int8)(v.ptr))) -} - -func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeInt(int64(*(*int16)(v.ptr))) -} - -func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeInt(int64(*(*int32)(v.ptr))) -} - -func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeInt(int64(*(*int64)(v.ptr))) -} - -func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeUint(uint64(*(*uint)(v.ptr))) -} - -func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeUint(uint64(*(*uint8)(v.ptr))) -} - -func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeUint(uint64(*(*uint16)(v.ptr))) -} - -func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeUint(uint64(*(*uint32)(v.ptr))) -} - -func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeUint(uint64(*(*uint64)(v.ptr))) -} - -func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - e.e.EncodeUint(uint64(*(*uintptr)(v.ptr))) -} - -// ------------ - -// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { -// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) -// // if urv.flag&unsafeFlagIndir != 0 { -// // urv.ptr = *(*unsafe.Pointer)(urv.ptr) -// // } -// *(*[]byte)(urv.ptr) = d.rawBytes() -// } - -// func rv0t(rt reflect.Type) reflect.Value { -// ut := (*unsafeIntf)(unsafe.Pointer(&rt)) -// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr -// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())} -// return *(*reflect.Value)(unsafe.Pointer(&uv}) -// } - -// func rv2i(rv reflect.Value) interface{} { -// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) -// // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir -// var ptr unsafe.Pointer -// // kk := reflect.Kind(urv.flag & (1<<5 - 1)) -// // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 { -// if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { -// ptr = *(*unsafe.Pointer)(urv.ptr) -// } else { -// ptr = urv.ptr -// } -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) -// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// } - -// func definitelyNil(v interface{}) bool { -// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v)) -// if ui.word == nil { -// return true -// } -// var tk = reflect.TypeOf(v).Kind() -// return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil -// fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n", -// v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil) -// } - -// func keepAlive4BytesView(v string) { -// runtime.KeepAlive(v) -// } - -// func keepAlive4StringView(v []byte) { -// runtime.KeepAlive(v) -// } - -// func rt2id(rt reflect.Type) uintptr { -// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) -// // var i interface{} = rt -// // // ui := (*unsafeIntf)(unsafe.Pointer(&i)) -// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word -// } - -// func rv2i(rv reflect.Value) interface{} { -// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) -// // non-reference type: already indir -// // reference type: depend on flagIndir property ('cos maybe was double-referenced) -// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 ) -// // rvk := reflect.Kind(urv.flag & (1<<5 - 1)) -// // if (rvk == reflect.Chan || -// // rvk == reflect.Func || -// // rvk == reflect.Interface || -// // rvk == reflect.Map || -// // rvk == reflect.Ptr || -// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 { -// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) -// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// // } -// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 { -// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// } -// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type()) -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// } - -// const ( -// unsafeRvFlagKindMask = 1<<5 - 1 -// unsafeRvKindDirectIface = 1 << 5 -// unsafeRvFlagIndir = 1 << 7 -// unsafeRvFlagAddr = 1 << 8 -// unsafeRvFlagMethod = 1 << 9 - -// _USE_RV_INTERFACE bool = false -// _UNSAFE_RV_DEBUG = true -// ) - -// type unsafeRtype struct { -// _ [2]uintptr -// _ uint32 -// _ uint8 -// _ uint8 -// _ uint8 -// kind uint8 -// _ [2]uintptr -// _ int32 -// } - -// func _rv2i(rv reflect.Value) interface{} { -// // Note: From use, -// // - it's never an interface -// // - the only calls here are for ifaceIndir types. -// // (though that conditional is wrong) -// // To know for sure, we need the value of t.kind (which is not exposed). -// // -// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct) -// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string -// // - Type Direct, Value indirect: ==> map??? -// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map -// // -// // TRANSLATES TO: -// // if typeIndirect { } else if valueIndirect { } else { } -// // -// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored. - -// if _USE_RV_INTERFACE { -// return rv.Interface() -// } -// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - -// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { -// // println("***** IS flag method or interface: delegating to rv.Interface()") -// // return rv.Interface() -// // } - -// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { -// // println("***** IS Interface: delegate to rv.Interface") -// // return rv.Interface() -// // } -// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 { -// // if urv.flag&unsafeRvFlagAddr == 0 { -// // println("***** IS ifaceIndir typ") -// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ} -// // // return *(*interface{})(unsafe.Pointer(&ui)) -// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// // } -// // } else if urv.flag&unsafeRvFlagIndir != 0 { -// // println("***** IS flagindir") -// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// // } else { -// // println("***** NOT flagindir") -// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// // } -// // println("***** default: delegate to rv.Interface") - -// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ)) -// if _UNSAFE_RV_DEBUG { -// fmt.Printf(">>>> start: %v: ", rv.Type()) -// fmt.Printf("%v - %v\n", *urv, *urt) -// } -// if urt.kind&unsafeRvKindDirectIface == 0 { -// if _UNSAFE_RV_DEBUG { -// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type()) -// } -// // println("***** IS ifaceIndir typ") -// // if true || urv.flag&unsafeRvFlagAddr == 0 { -// // // println(" ***** IS NOT addr") -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// // } -// } else if urv.flag&unsafeRvFlagIndir != 0 { -// if _UNSAFE_RV_DEBUG { -// fmt.Printf("**** +flagIndir type: %v\n", rv.Type()) -// } -// // println("***** IS flagindir") -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) -// } else { -// if _UNSAFE_RV_DEBUG { -// fmt.Printf("**** -flagIndir type: %v\n", rv.Type()) -// } -// // println("***** NOT flagindir") -// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) -// } -// // println("***** default: delegating to rv.Interface()") -// // return rv.Interface() -// } - -// var staticM0 = make(map[string]uint64) -// var staticI0 = (int32)(-5) - -// func staticRv2iTest() { -// i0 := (int32)(-5) -// m0 := make(map[string]uint16) -// m0["1"] = 1 -// for _, i := range []interface{}{ -// (int)(7), -// (uint)(8), -// (int16)(-9), -// (uint16)(19), -// (uintptr)(77), -// (bool)(true), -// float32(-32.7), -// float64(64.9), -// complex(float32(19), 5), -// complex(float64(-32), 7), -// [4]uint64{1, 2, 3, 4}, -// (chan<- int)(nil), // chan, -// rv2i, // func -// io.Writer(ioutil.Discard), -// make(map[string]uint), -// (map[string]uint)(nil), -// staticM0, -// m0, -// &m0, -// i0, -// &i0, -// &staticI0, -// &staticM0, -// []uint32{6, 7, 8}, -// "abc", -// Raw{}, -// RawExt{}, -// &Raw{}, -// &RawExt{}, -// unsafe.Pointer(&i0), -// } { -// i2 := rv2i(reflect.ValueOf(i)) -// eq := reflect.DeepEqual(i, i2) -// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq) -// } -// // os.Exit(0) -// } - -// func init() { -// staticRv2iTest() -// } - -// func rv2i(rv reflect.Value) interface{} { -// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() { -// return rv.Interface() -// } -// // var i interface{} -// // ui := (*unsafeIntf)(unsafe.Pointer(&i)) -// var ui unsafeIntf -// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) -// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr)) -// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 { -// if urv.flag&unsafeRvFlagAddr != 0 { -// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()") -// return rv.Interface() -// } -// println("****** indirect type/kind") -// ui.word = urv.ptr -// } else if urv.flag&unsafeRvFlagIndir != 0 { -// println("****** unsafe rv flag indir") -// ui.word = *(*unsafe.Pointer)(urv.ptr) -// } else { -// println("****** default: assign prt to word directly") -// ui.word = urv.ptr -// } -// // ui.word = urv.ptr -// ui.typ = urv.typ -// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word) -// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word)) -// return *(*interface{})(unsafe.Pointer(&ui)) -// // return i -// } diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/json.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/json.go deleted file mode 100644 index a731c816..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/json.go +++ /dev/null @@ -1,1491 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// By default, this json support uses base64 encoding for bytes, because you cannot -// store and read any arbitrary string in json (only unicode). -// However, the user can configre how to encode/decode bytes. -// -// This library specifically supports UTF-8 for encoding and decoding only. -// -// Note that the library will happily encode/decode things which are not valid -// json e.g. a map[int64]string. We do it for consistency. With valid json, -// we will encode and decode appropriately. -// Users can specify their map type if necessary to force it. -// -// Note: -// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. -// We implement it here. - -// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver -// MUST not call one-another. - -import ( - "bytes" - "encoding/base64" - "math" - "reflect" - "strconv" - "time" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -//-------------------------------- - -var jsonLiterals = [...]byte{ - '"', 't', 'r', 'u', 'e', '"', - '"', 'f', 'a', 'l', 's', 'e', '"', - '"', 'n', 'u', 'l', 'l', '"', -} - -const ( - jsonLitTrueQ = 0 - jsonLitTrue = 1 - jsonLitFalseQ = 6 - jsonLitFalse = 7 - // jsonLitNullQ = 13 - jsonLitNull = 14 -) - -var ( - jsonLiteral4True = jsonLiterals[jsonLitTrue+1 : jsonLitTrue+4] - jsonLiteral4False = jsonLiterals[jsonLitFalse+1 : jsonLitFalse+5] - jsonLiteral4Null = jsonLiterals[jsonLitNull+1 : jsonLitNull+4] -) - -const ( - jsonU4Chk2 = '0' - jsonU4Chk1 = 'a' - 10 - jsonU4Chk0 = 'A' - 10 - - jsonScratchArrayLen = 64 -) - -const ( - // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: - // - If we see first character of null, false or true, - // do not validate subsequent characters. - // - e.g. if we see a n, assume null and skip next 3 characters, - // and do not validate they are ull. - // P.S. Do not expect a significant decoding boost from this. - jsonValidateSymbols = true - - jsonSpacesOrTabsLen = 128 - - jsonAlwaysReturnInternString = false -) - -var ( - // jsonTabs and jsonSpaces are used as caches for indents - jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte - - jsonCharHtmlSafeSet bitset256 - jsonCharSafeSet bitset256 - jsonCharWhitespaceSet bitset256 - jsonNumSet bitset256 -) - -func init() { - var i byte - for i = 0; i < jsonSpacesOrTabsLen; i++ { - jsonSpaces[i] = ' ' - jsonTabs[i] = '\t' - } - - // populate the safe values as true: note: ASCII control characters are (0-31) - // jsonCharSafeSet: all true except (0-31) " \ - // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & - for i = 32; i < utf8.RuneSelf; i++ { - switch i { - case '"', '\\': - case '<', '>', '&': - jsonCharSafeSet.set(i) // = true - default: - jsonCharSafeSet.set(i) - jsonCharHtmlSafeSet.set(i) - } - } - for i = 0; i <= utf8.RuneSelf; i++ { - switch i { - case ' ', '\t', '\r', '\n': - jsonCharWhitespaceSet.set(i) - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': - jsonNumSet.set(i) - } - } -} - -// ---------------- - -type jsonEncDriverTypical struct { - jsonEncDriver -} - -func (e *jsonEncDriverTypical) typical() {} - -func (e *jsonEncDriverTypical) WriteArrayStart(length int) { - e.w.writen1('[') - e.c = containerArrayStart -} - -func (e *jsonEncDriverTypical) WriteArrayElem() { - if e.c != containerArrayStart { - e.w.writen1(',') - } - e.c = containerArrayElem -} - -func (e *jsonEncDriverTypical) WriteArrayEnd() { - e.w.writen1(']') - e.c = containerArrayEnd -} - -func (e *jsonEncDriverTypical) WriteMapStart(length int) { - e.w.writen1('{') - e.c = containerMapStart -} - -func (e *jsonEncDriverTypical) WriteMapElemKey() { - if e.c != containerMapStart { - e.w.writen1(',') - } - e.c = containerMapKey -} - -func (e *jsonEncDriverTypical) WriteMapElemValue() { - e.w.writen1(':') - e.c = containerMapValue -} - -func (e *jsonEncDriverTypical) WriteMapEnd() { - e.w.writen1('}') - e.c = containerMapEnd -} - -func (e *jsonEncDriverTypical) EncodeBool(b bool) { - if b { - e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) - } else { - e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) - } -} - -func (e *jsonEncDriverTypical) EncodeFloat64(f float64) { - fmt, prec := jsonFloatStrconvFmtPrec(f) - e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) -} - -func (e *jsonEncDriverTypical) EncodeInt(v int64) { - e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) -} - -func (e *jsonEncDriverTypical) EncodeUint(v uint64) { - e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) -} - -func (e *jsonEncDriverTypical) EncodeFloat32(f float32) { - e.EncodeFloat64(float64(f)) -} - -// func (e *jsonEncDriverTypical) atEndOfEncode() { -// if e.tw { -// e.w.writen1(' ') -// } -// } - -// ---------------- - -type jsonEncDriverGeneric struct { - jsonEncDriver - // ds string // indent string - di int8 // indent per - d bool // indenting? - dt bool // indent using tabs - dl uint16 // indent level - ks bool // map key as string - is byte // integer as string - _ byte // padding - _ [2]uint64 // padding -} - -// indent is done as below: -// - newline and indent are added before each mapKey or arrayElem -// - newline and indent are added before each ending, -// except there was no entry (so we can have {} or []) - -func (e *jsonEncDriverGeneric) reset() { - e.jsonEncDriver.reset() - e.d, e.dt, e.dl, e.di = false, false, 0, 0 - if e.h.Indent > 0 { - e.d = true - e.di = int8(e.h.Indent) - } else if e.h.Indent < 0 { - e.d = true - e.dt = true - e.di = int8(-e.h.Indent) - } - e.ks = e.h.MapKeyAsString - e.is = e.h.IntegerAsString -} - -func (e *jsonEncDriverGeneric) WriteArrayStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('[') - e.c = containerArrayStart -} - -func (e *jsonEncDriverGeneric) WriteArrayElem() { - if e.c != containerArrayStart { - e.w.writen1(',') - } - if e.d { - e.writeIndent() - } - e.c = containerArrayElem -} - -func (e *jsonEncDriverGeneric) WriteArrayEnd() { - if e.d { - e.dl-- - if e.c != containerArrayStart { - e.writeIndent() - } - } - e.w.writen1(']') - e.c = containerArrayEnd -} - -func (e *jsonEncDriverGeneric) WriteMapStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('{') - e.c = containerMapStart -} - -func (e *jsonEncDriverGeneric) WriteMapElemKey() { - if e.c != containerMapStart { - e.w.writen1(',') - } - if e.d { - e.writeIndent() - } - e.c = containerMapKey -} - -func (e *jsonEncDriverGeneric) WriteMapElemValue() { - if e.d { - e.w.writen2(':', ' ') - } else { - e.w.writen1(':') - } - e.c = containerMapValue -} - -func (e *jsonEncDriverGeneric) WriteMapEnd() { - if e.d { - e.dl-- - if e.c != containerMapStart { - e.writeIndent() - } - } - e.w.writen1('}') - e.c = containerMapEnd -} - -func (e *jsonEncDriverGeneric) writeIndent() { - e.w.writen1('\n') - x := int(e.di) * int(e.dl) - if e.dt { - for x > jsonSpacesOrTabsLen { - e.w.writeb(jsonTabs[:]) - x -= jsonSpacesOrTabsLen - } - e.w.writeb(jsonTabs[:x]) - } else { - for x > jsonSpacesOrTabsLen { - e.w.writeb(jsonSpaces[:]) - x -= jsonSpacesOrTabsLen - } - e.w.writeb(jsonSpaces[:x]) - } -} - -func (e *jsonEncDriverGeneric) EncodeBool(b bool) { - if e.ks && e.c == containerMapKey { - if b { - e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) - } else { - e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) - } - } else { - if b { - e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) - } else { - e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) - } - } -} - -func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) { - // instead of using 'g', specify whether to use 'e' or 'f' - fmt, prec := jsonFloatStrconvFmtPrec(f) - - var blen int - if e.ks && e.c == containerMapKey { - blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64)) - e.b[0] = '"' - e.b[blen-1] = '"' - } else { - blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) - } - e.w.writeb(e.b[:blen]) -} - -func (e *jsonEncDriverGeneric) EncodeInt(v int64) { - x := e.is - if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) { - blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) - e.b[0] = '"' - e.b[blen-1] = '"' - e.w.writeb(e.b[:blen]) - return - } - e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) -} - -func (e *jsonEncDriverGeneric) EncodeUint(v uint64) { - x := e.is - if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) { - blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) - e.b[0] = '"' - e.b[blen-1] = '"' - e.w.writeb(e.b[:blen]) - return - } - e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) -} - -func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) { - // e.encodeFloat(float64(f), 32) - // always encode all floats as IEEE 64-bit floating point. - // It also ensures that we can decode in full precision even if into a float32, - // as what is written is always to float64 precision. - e.EncodeFloat64(float64(f)) -} - -// func (e *jsonEncDriverGeneric) atEndOfEncode() { -// if e.tw { -// if e.d { -// e.w.writen1('\n') -// } else { -// e.w.writen1(' ') -// } -// } -// } - -// -------------------- - -type jsonEncDriver struct { - noBuiltInTypes - e *Encoder - h *JsonHandle - w *encWriterSwitch - se extWrapper - // ---- cpu cache line boundary? - bs []byte // scratch - // ---- cpu cache line boundary? - // scratch: encode time, etc. - // include scratch buffer and padding, but leave space for containerstate - b [jsonScratchArrayLen + 8 + 8 - 1]byte - c containerState - // _ [2]uint64 // padding -} - -func (e *jsonEncDriver) EncodeNil() { - // We always encode nil as just null (never in quotes) - // This allows us to easily decode if a nil in the json stream - // ie if initial token is n. - e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) - - // if e.h.MapKeyAsString && e.c == containerMapKey { - // e.w.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) - // } else { - // e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) - // } -} - -func (e *jsonEncDriver) EncodeTime(t time.Time) { - // Do NOT use MarshalJSON, as it allocates internally. - // instead, we call AppendFormat directly, using our scratch buffer (e.b) - if t.IsZero() { - e.EncodeNil() - } else { - e.b[0] = '"' - b := t.AppendFormat(e.b[1:1], time.RFC3339Nano) - e.b[len(b)+1] = '"' - e.w.writeb(e.b[:len(b)+2]) - } - // v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.w.writeb(v) -} - -func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - // only encodes re.Value (never re.Data) - if re.Value == nil { - e.EncodeNil() - } else { - en.encode(re.Value) - } -} - -func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { - e.quoteStr(v) -} - -func (e *jsonEncDriver) EncodeStringEnc(c charEncoding, v string) { - e.quoteStr(v) -} - -func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - // if encoding raw bytes and RawBytesExt is configured, use it to encode - if v == nil { - e.EncodeNil() - return - } - if c == cRAW { - if e.se.InterfaceExt != nil { - e.EncodeExt(v, 0, &e.se, e.e) - return - } - - slen := base64.StdEncoding.EncodedLen(len(v)) + 2 - if cap(e.bs) >= slen { - e.bs = e.bs[:slen] - } else { - e.bs = make([]byte, slen) - } - e.bs[0] = '"' - base64.StdEncoding.Encode(e.bs[1:], v) - e.bs[slen-1] = '"' - e.w.writeb(e.bs) - } else { - e.quoteStr(stringView(v)) - } -} - -func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) { - // if encoding raw bytes and RawBytesExt is configured, use it to encode - if v == nil { - e.EncodeNil() - return - } - if e.se.InterfaceExt != nil { - e.EncodeExt(v, 0, &e.se, e.e) - return - } - - slen := base64.StdEncoding.EncodedLen(len(v)) + 2 - if cap(e.bs) >= slen { - e.bs = e.bs[:slen] - } else { - e.bs = make([]byte, slen) - } - e.bs[0] = '"' - base64.StdEncoding.Encode(e.bs[1:], v) - e.bs[slen-1] = '"' - e.w.writeb(e.bs) -} - -func (e *jsonEncDriver) EncodeAsis(v []byte) { - e.w.writeb(v) -} - -func (e *jsonEncDriver) quoteStr(s string) { - // adapted from std pkg encoding/json - const hex = "0123456789abcdef" - w := e.w - htmlasis := e.h.HTMLCharsAsIs - w.writen1('"') - var start int - for i, slen := 0, len(s); i < slen; { - // encode all bytes < 0x20 (except \r, \n). - // also encode < > & to prevent security holes when served to some browsers. - if b := s[i]; b < utf8.RuneSelf { - // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) { - if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) { - i++ - continue - } - if start < i { - w.writestr(s[start:i]) - } - switch b { - case '\\', '"': - w.writen2('\\', b) - case '\n': - w.writen2('\\', 'n') - case '\r': - w.writen2('\\', 'r') - case '\b': - w.writen2('\\', 'b') - case '\f': - w.writen2('\\', 'f') - case '\t': - w.writen2('\\', 't') - default: - w.writestr(`\u00`) - w.writen2(hex[b>>4], hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. - // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. - if c == '\u2028' || c == '\u2029' { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\u202`) - w.writen1(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - w.writestr(s[start:]) - } - w.writen1('"') -} - -func (e *jsonEncDriver) atEndOfEncode() { - // if e.c == 0 { // scalar written, output space - // e.w.writen1(' ') - // } else if e.h.TermWhitespace { // container written, output new-line - // e.w.writen1('\n') - // } - if e.h.TermWhitespace { - if e.c == 0 { // scalar written, output space - e.w.writen1(' ') - } else { // container written, output new-line - e.w.writen1('\n') - } - } - - // e.c = 0 -} - -type jsonDecDriver struct { - noBuiltInTypes - d *Decoder - h *JsonHandle - r *decReaderSwitch - se extWrapper - - // ---- writable fields during execution --- *try* to keep in sep cache line - - c containerState - // tok is used to store the token read right after skipWhiteSpace. - tok uint8 - fnull bool // found null from appendStringAsBytes - bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. - bstr [8]byte // scratch used for string \UXXX parsing - // ---- cpu cache line boundary? - b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time - b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes - - // _ [3]uint64 // padding - // n jsonNum -} - -// func jsonIsWS(b byte) bool { -// // return b == ' ' || b == '\t' || b == '\r' || b == '\n' -// return jsonCharWhitespaceSet.isset(b) -// } - -func (d *jsonDecDriver) uncacheRead() { - if d.tok != 0 { - d.r.unreadn1() - d.tok = 0 - } -} - -func (d *jsonDecDriver) ReadMapStart() int { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - const xc uint8 = '{' - if d.tok != xc { - d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - d.c = containerMapStart - return -1 -} - -func (d *jsonDecDriver) ReadArrayStart() int { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - const xc uint8 = '[' - if d.tok != xc { - d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - d.c = containerArrayStart - return -1 -} - -func (d *jsonDecDriver) CheckBreak() bool { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - return d.tok == '}' || d.tok == ']' -} - -// For the ReadXXX methods below, we could just delegate to helper functions -// readContainerState(c containerState, xc uint8, check bool) -// - ReadArrayElem would become: -// readContainerState(containerArrayElem, ',', d.c != containerArrayStart) -// -// However, until mid-stack inlining comes in go1.11 which supports inlining of -// one-liners, we explicitly write them all 5 out to elide the extra func call. -// -// TODO: For Go 1.11, if inlined, consider consolidating these. - -func (d *jsonDecDriver) ReadArrayElem() { - const xc uint8 = ',' - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.c != containerArrayStart { - if d.tok != xc { - d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - } - d.c = containerArrayElem -} - -func (d *jsonDecDriver) ReadArrayEnd() { - const xc uint8 = ']' - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.tok != xc { - d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - d.c = containerArrayEnd -} - -func (d *jsonDecDriver) ReadMapElemKey() { - const xc uint8 = ',' - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.c != containerMapStart { - if d.tok != xc { - d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - } - d.c = containerMapKey -} - -func (d *jsonDecDriver) ReadMapElemValue() { - const xc uint8 = ':' - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.tok != xc { - d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - d.c = containerMapValue -} - -func (d *jsonDecDriver) ReadMapEnd() { - const xc uint8 = '}' - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.tok != xc { - d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - d.c = containerMapEnd -} - -// func (d *jsonDecDriver) readLit(length, fromIdx uint8) { -// // length here is always less than 8 (literals are: null, true, false) -// bs := d.r.readx(int(length)) -// d.tok = 0 -// if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { -// d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) -// } -// } - -func (d *jsonDecDriver) readLit4True() { - bs := d.r.readx(3) - d.tok = 0 - if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4True) { - d.d.errorf("expecting %s: got %s", jsonLiteral4True, bs) - } -} - -func (d *jsonDecDriver) readLit4False() { - bs := d.r.readx(4) - d.tok = 0 - if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4False) { - d.d.errorf("expecting %s: got %s", jsonLiteral4False, bs) - } -} - -func (d *jsonDecDriver) readLit4Null() { - bs := d.r.readx(3) - d.tok = 0 - if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4Null) { - d.d.errorf("expecting %s: got %s", jsonLiteral4Null, bs) - } -} - -func (d *jsonDecDriver) TryDecodeAsNil() bool { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - // we shouldn't try to see if "null" was here, right? - // only the plain string: `null` denotes a nil (ie not quotes) - if d.tok == 'n' { - d.readLit4Null() - return true - } - return false -} - -func (d *jsonDecDriver) DecodeBool() (v bool) { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - fquot := d.c == containerMapKey && d.tok == '"' - if fquot { - d.tok = d.r.readn1() - } - switch d.tok { - case 'f': - d.readLit4False() - // v = false - case 't': - d.readLit4True() - v = true - default: - d.d.errorf("decode bool: got first char %c", d.tok) - // v = false // "unreachable" - } - if fquot { - d.r.readn1() - } - return -} - -func (d *jsonDecDriver) DecodeTime() (t time.Time) { - // read string, and pass the string into json.unmarshal - d.appendStringAsBytes() - if d.fnull { - return - } - t, err := time.Parse(time.RFC3339, stringView(d.bs)) - if err != nil { - d.d.errorv(err) - } - return -} - -func (d *jsonDecDriver) ContainerType() (vt valueType) { - // check container type by checking the first char - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - - // optimize this, so we don't do 4 checks but do one computation. - // return jsonContainerSet[d.tok] - - // ContainerType is mostly called for Map and Array, - // so this conditional is good enough (max 2 checks typically) - if b := d.tok; b == '{' { - return valueTypeMap - } else if b == '[' { - return valueTypeArray - } else if b == 'n' { - return valueTypeNil - } else if b == '"' { - return valueTypeString - } - return valueTypeUnset -} - -func (d *jsonDecDriver) decNumBytes() (bs []byte) { - // stores num bytes in d.bs - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - if d.tok == '"' { - bs = d.r.readUntil(d.b2[:0], '"') - bs = bs[:len(bs)-1] - } else { - d.r.unreadn1() - bs = d.r.readTo(d.bs[:0], &jsonNumSet) - } - d.tok = 0 - return bs -} - -func (d *jsonDecDriver) DecodeUint64() (u uint64) { - bs := d.decNumBytes() - if len(bs) == 0 { - return - } - n, neg, badsyntax, overflow := jsonParseInteger(bs) - if overflow { - d.d.errorf("overflow parsing unsigned integer: %s", bs) - } else if neg { - d.d.errorf("minus found parsing unsigned integer: %s", bs) - } else if badsyntax { - // fallback: try to decode as float, and cast - n = d.decUint64ViaFloat(stringView(bs)) - } - return n -} - -func (d *jsonDecDriver) DecodeInt64() (i int64) { - const cutoff = uint64(1 << uint(64-1)) - bs := d.decNumBytes() - if len(bs) == 0 { - return - } - n, neg, badsyntax, overflow := jsonParseInteger(bs) - if overflow { - d.d.errorf("overflow parsing integer: %s", bs) - } else if badsyntax { - // d.d.errorf("invalid syntax for integer: %s", bs) - // fallback: try to decode as float, and cast - if neg { - n = d.decUint64ViaFloat(stringView(bs[1:])) - } else { - n = d.decUint64ViaFloat(stringView(bs)) - } - } - if neg { - if n > cutoff { - d.d.errorf("overflow parsing integer: %s", bs) - } - i = -(int64(n)) - } else { - if n >= cutoff { - d.d.errorf("overflow parsing integer: %s", bs) - } - i = int64(n) - } - return -} - -func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) { - if len(s) == 0 { - return - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - d.d.errorf("invalid syntax for integer: %s", s) - // d.d.errorv(err) - } - fi, ff := math.Modf(f) - if ff > 0 { - d.d.errorf("fractional part found parsing integer: %s", s) - } else if fi > float64(math.MaxUint64) { - d.d.errorf("overflow parsing integer: %s", s) - } - return uint64(fi) -} - -func (d *jsonDecDriver) DecodeFloat64() (f float64) { - bs := d.decNumBytes() - if len(bs) == 0 { - return - } - f, err := strconv.ParseFloat(stringView(bs), 64) - if err != nil { - d.d.errorv(err) - } - return -} - -func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if ext == nil { - re := rv.(*RawExt) - re.Tag = xtag - d.d.decode(&re.Value) - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - return -} - -func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. - if d.se.InterfaceExt != nil { - bsOut = bs - d.DecodeExt(&bsOut, 0, &d.se) - return - } - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - // check if an "array" of uint8's (see ContainerType for how to infer if an array) - if d.tok == '[' { - bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) - return - } - d.appendStringAsBytes() - // base64 encodes []byte{} as "", and we encode nil []byte as null. - // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. - // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. - // However, it sets a fnull field to true, so we can check if a null was found. - if len(d.bs) == 0 { - if d.fnull { - return nil - } - return []byte{} - } - bs0 := d.bs - slen := base64.StdEncoding.DecodedLen(len(bs0)) - if slen <= cap(bs) { - bsOut = bs[:slen] - } else if zerocopy && slen <= cap(d.b2) { - bsOut = d.b2[:slen] - } else { - bsOut = make([]byte, slen) - } - slen2, err := base64.StdEncoding.Decode(bsOut, bs0) - if err != nil { - d.d.errorf("error decoding base64 binary '%s': %v", bs0, err) - return nil - } - if slen != slen2 { - bsOut = bsOut[:slen2] - } - return -} - -func (d *jsonDecDriver) DecodeString() (s string) { - d.appendStringAsBytes() - return d.bsToString() -} - -func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { - d.appendStringAsBytes() - return d.bs -} - -func (d *jsonDecDriver) appendStringAsBytes() { - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - - d.fnull = false - if d.tok != '"' { - // d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok) - // handle non-string scalar: null, true, false or a number - switch d.tok { - case 'n': - d.readLit4Null() - d.bs = d.bs[:0] - d.fnull = true - case 'f': - d.readLit4False() - d.bs = d.bs[:5] - copy(d.bs, "false") - case 't': - d.readLit4True() - d.bs = d.bs[:4] - copy(d.bs, "true") - default: - // try to parse a valid number - bs := d.decNumBytes() - if len(bs) <= cap(d.bs) { - d.bs = d.bs[:len(bs)] - } else { - d.bs = make([]byte, len(bs)) - } - copy(d.bs, bs) - } - return - } - - d.tok = 0 - r := d.r - var cs = r.readUntil(d.b2[:0], '"') - var cslen = uint(len(cs)) - var c uint8 - v := d.bs[:0] - // append on each byte seen can be expensive, so we just - // keep track of where we last read a contiguous set of - // non-special bytes (using cursor variable), - // and when we see a special byte - // e.g. end-of-slice, " or \, - // we will append the full range into the v slice before proceeding - var i, cursor uint - for { - if i == cslen { - v = append(v, cs[cursor:]...) - cs = r.readUntil(d.b2[:0], '"') - cslen = uint(len(cs)) - i, cursor = 0, 0 - } - c = cs[i] - if c == '"' { - v = append(v, cs[cursor:i]...) - break - } - if c != '\\' { - i++ - continue - } - v = append(v, cs[cursor:i]...) - i++ - c = cs[i] - switch c { - case '"', '\\', '/', '\'': - v = append(v, c) - case 'b': - v = append(v, '\b') - case 'f': - v = append(v, '\f') - case 'n': - v = append(v, '\n') - case 'r': - v = append(v, '\r') - case 't': - v = append(v, '\t') - case 'u': - var r rune - var rr uint32 - if cslen < i+4 { - d.d.errorf("need at least 4 more bytes for unicode sequence") - } - var j uint - for _, c = range cs[i+1 : i+5] { // bounds-check-elimination - // best to use explicit if-else - // - not a table, etc which involve memory loads, array lookup with bounds checks, etc - if c >= '0' && c <= '9' { - rr = rr*16 + uint32(c-jsonU4Chk2) - } else if c >= 'a' && c <= 'f' { - rr = rr*16 + uint32(c-jsonU4Chk1) - } else if c >= 'A' && c <= 'F' { - rr = rr*16 + uint32(c-jsonU4Chk0) - } else { - r = unicode.ReplacementChar - i += 4 - goto encode_rune - } - } - r = rune(rr) - i += 4 - if utf16.IsSurrogate(r) { - if len(cs) >= int(i+6) { - var cx = cs[i+1:][:6:6] // [:6] affords bounds-check-elimination - if cx[0] == '\\' && cx[1] == 'u' { - i += 2 - var rr1 uint32 - for j = 2; j < 6; j++ { - c = cx[j] - if c >= '0' && c <= '9' { - rr = rr*16 + uint32(c-jsonU4Chk2) - } else if c >= 'a' && c <= 'f' { - rr = rr*16 + uint32(c-jsonU4Chk1) - } else if c >= 'A' && c <= 'F' { - rr = rr*16 + uint32(c-jsonU4Chk0) - } else { - r = unicode.ReplacementChar - i += 4 - goto encode_rune - } - } - r = utf16.DecodeRune(r, rune(rr1)) - i += 4 - goto encode_rune - } - } - r = unicode.ReplacementChar - } - encode_rune: - w2 := utf8.EncodeRune(d.bstr[:], r) - v = append(v, d.bstr[:w2]...) - default: - d.d.errorf("unsupported escaped value: %c", c) - } - i++ - cursor = i - } - d.bs = v -} - -func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { - const cutoff = uint64(1 << uint(64-1)) - - var n uint64 - var neg, badsyntax, overflow bool - - if len(bs) == 0 { - if d.h.PreferFloat { - z.v = valueTypeFloat - z.f = 0 - } else if d.h.SignedInteger { - z.v = valueTypeInt - z.i = 0 - } else { - z.v = valueTypeUint - z.u = 0 - } - return - } - if d.h.PreferFloat { - goto F - } - n, neg, badsyntax, overflow = jsonParseInteger(bs) - if badsyntax || overflow { - goto F - } - if neg { - if n > cutoff { - goto F - } - z.v = valueTypeInt - z.i = -(int64(n)) - } else if d.h.SignedInteger { - if n >= cutoff { - goto F - } - z.v = valueTypeInt - z.i = int64(n) - } else { - z.v = valueTypeUint - z.u = n - } - return -F: - z.v = valueTypeFloat - z.f, err = strconv.ParseFloat(stringView(bs), 64) - return -} - -func (d *jsonDecDriver) bsToString() string { - // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key - if jsonAlwaysReturnInternString || d.c == containerMapKey { - return d.d.string(d.bs) - } - return string(d.bs) -} - -func (d *jsonDecDriver) DecodeNaked() { - z := d.d.naked() - // var decodeFurther bool - - if d.tok == 0 { - d.tok = d.r.skip(&jsonCharWhitespaceSet) - } - switch d.tok { - case 'n': - d.readLit4Null() - z.v = valueTypeNil - case 'f': - d.readLit4False() - z.v = valueTypeBool - z.b = false - case 't': - d.readLit4True() - z.v = valueTypeBool - z.b = true - case '{': - z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart - case '[': - z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart - case '"': - // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first - d.appendStringAsBytes() - if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { - switch stringView(d.bs) { - case "null": - z.v = valueTypeNil - case "true": - z.v = valueTypeBool - z.b = true - case "false": - z.v = valueTypeBool - z.b = false - default: - // check if a number: float, int or uint - if err := d.nakedNum(z, d.bs); err != nil { - z.v = valueTypeString - z.s = d.bsToString() - } - } - } else { - z.v = valueTypeString - z.s = d.bsToString() - } - default: // number - bs := d.decNumBytes() - if len(bs) == 0 { - d.d.errorf("decode number from empty string") - return - } - if err := d.nakedNum(z, bs); err != nil { - d.d.errorf("decode number from %s: %v", bs, err) - return - } - } - // if decodeFurther { - // d.s.sc.retryRead() - // } -} - -//---------------------- - -// JsonHandle is a handle for JSON encoding format. -// -// Json is comprehensively supported: -// - decodes numbers into interface{} as int, uint or float64 -// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc. -// - decode integers from float formatted numbers e.g. 1.27e+8 -// - decode any json value (numbers, bool, etc) from quoted strings -// - configurable way to encode/decode []byte . -// by default, encodes and decodes []byte using base64 Std Encoding -// - UTF-8 support for encoding and decoding -// -// It has better performance than the json library in the standard library, -// by leveraging the performance improvements of the codec library. -// -// In addition, it doesn't read more bytes than necessary during a decode, which allows -// reading multiple values from a stream containing json and non-json content. -// For example, a user can read a json value, then a cbor value, then a msgpack value, -// all from the same stream in sequence. -// -// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are -// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD. -type JsonHandle struct { - textEncodingType - BasicHandle - - // Indent indicates how a value is encoded. - // - If positive, indent by that number of spaces. - // - If negative, indent by that number of tabs. - Indent int8 - - // IntegerAsString controls how integers (signed and unsigned) are encoded. - // - // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. - // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. - // This can be mitigated by configuring how to encode integers. - // - // IntegerAsString interpretes the following values: - // - if 'L', then encode integers > 2^53 as a json string. - // - if 'A', then encode all integers as a json string - // containing the exact integer representation as a decimal. - // - else encode all integers as a json number (default) - IntegerAsString byte - - // HTMLCharsAsIs controls how to encode some special characters to html: < > & - // - // By default, we encode them as \uXXX - // to prevent security holes when served from some browsers. - HTMLCharsAsIs bool - - // PreferFloat says that we will default to decoding a number as a float. - // If not set, we will examine the characters of the number and decode as an - // integer type if it doesn't have any of the characters [.eE]. - PreferFloat bool - - // TermWhitespace says that we add a whitespace character - // at the end of an encoding. - // - // The whitespace is important, especially if using numbers in a context - // where multiple items are written to a stream. - TermWhitespace bool - - // MapKeyAsString says to encode all map keys as strings. - // - // Use this to enforce strict json output. - // The only caveat is that nil value is ALWAYS written as null (never as "null") - MapKeyAsString bool - - // _ [2]byte // padding - - // Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver. - - // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. - // If not configured, raw bytes are encoded to/from base64 text. - RawBytesExt InterfaceExt - - _ [2]uint64 // padding -} - -// Name returns the name of the handle: json -func (h *JsonHandle) Name() string { return "json" } -func (h *JsonHandle) hasElemSeparators() bool { return true } -func (h *JsonHandle) typical() bool { - return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L' -} - -type jsonTypical interface { - typical() -} - -func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) { - _, v = ed.(jsonTypical) - return v != h.typical() -} - -// SetInterfaceExt sets an extension -func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) -} - -func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) { - var hd *jsonEncDriver - if h.typical() { - var v jsonEncDriverTypical - ee = &v - hd = &v.jsonEncDriver - } else { - var v jsonEncDriverGeneric - ee = &v - hd = &v.jsonEncDriver - } - hd.e, hd.h, hd.bs = e, h, hd.b[:0] - hd.se.BytesExt = bytesExtFailer{} - ee.reset() - return -} - -func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { - // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} - hd := jsonDecDriver{d: d, h: h} - hd.se.BytesExt = bytesExtFailer{} - hd.bs = hd.b[:0] - hd.reset() - return &hd -} - -func (e *jsonEncDriver) reset() { - e.w = e.e.w - e.se.InterfaceExt = e.h.RawBytesExt - if e.bs != nil { - e.bs = e.bs[:0] - } - e.c = 0 -} - -func (d *jsonDecDriver) reset() { - d.r = d.d.r - d.se.InterfaceExt = d.h.RawBytesExt - if d.bs != nil { - d.bs = d.bs[:0] - } - d.c, d.tok = 0, 0 - // d.n.reset() -} - -func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) { - prec = -1 - var abs = math.Abs(f) - if abs != 0 && (abs < 1e-6 || abs >= 1e21) { - fmt = 'e' - } else { - fmt = 'f' - // set prec to 1 iff mod is 0. - // better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes. - // this ensures that every float has an e or .0 in it. - if abs <= 1 { - if abs == 0 || abs == 1 { - prec = 1 - } - } else if _, mod := math.Modf(abs); mod == 0 { - prec = 1 - } - } - return -} - -// custom-fitted version of strconv.Parse(Ui|I)nt. -// Also ensures we don't have to search for .eE to determine if a float or not. -// Note: s CANNOT be a zero-length slice. -func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) { - const maxUint64 = (1<<64 - 1) - const cutoff = maxUint64/10 + 1 - - if len(s) == 0 { // bounds-check-elimination - // treat empty string as zero value - // badSyntax = true - return - } - switch s[0] { - case '+': - s = s[1:] - case '-': - s = s[1:] - neg = true - } - for _, c := range s { - if c < '0' || c > '9' { - badSyntax = true - return - } - // unsigned integers don't overflow well on multiplication, so check cutoff here - // e.g. (maxUint64-5)*10 doesn't overflow well ... - if n >= cutoff { - overflow = true - return - } - n *= 10 - n1 := n + uint64(c-'0') - if n1 < n || n1 > maxUint64 { - overflow = true - return - } - n = n1 - } - return -} - -var _ decDriver = (*jsonDecDriver)(nil) -var _ encDriver = (*jsonEncDriverGeneric)(nil) -var _ encDriver = (*jsonEncDriverTypical)(nil) -var _ jsonTypical = (*jsonEncDriverTypical)(nil) diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl b/src/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl deleted file mode 100644 index c598cc73..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from mammoth-test.go.tmpl - DO NOT EDIT. - -package codec - -import "testing" -import "fmt" -import "reflect" - -// TestMammoth has all the different paths optimized in fast-path -// It has all the primitives, slices and maps. -// -// For each of those types, it has a pointer and a non-pointer field. - -func init() { _ = fmt.Printf } // so we can include fmt as needed - -type TestMammoth struct { - -{{range .Values }}{{if .Primitive }}{{/* -*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} -{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} -{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* -*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} -{{end}}{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* -*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} -{{end}}{{end}}{{end}} - -} - -{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* -*/}} type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }} -func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { } -{{end}}{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* -*/}} type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }} -{{end}}{{end}}{{end}} - -func doTestMammothSlices(t *testing.T, h Handle) { -{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* -*/}} - var v{{$i}}va [8]{{ .Elem }} - for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { {{/* - // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) - // - encode value to some []byte - // - decode into a length-wise-equal []byte - // - check if equal to initial slice - // - encode ptr to the value - // - check if encode bytes are same - // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice - // - decode into non-addressable slice of equal length, then larger len - // - for each decode, compare elem-by-elem to the original slice - // - - // - rinse and repeat for a MapBySlice version - // - - */}} - var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }} - v{{$i}}v1 = v - bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } - testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } - testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr") - // ... - bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") - v{{$i}}v2 = nil - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - v{{$i}}v2 = v{{$i}}va[:1:1] - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)] - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - v{{$i}}v2 = v{{$i}}va[:] - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap") - if len(v{{$i}}v1) > 1 { - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr") - testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr") - testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr") - } - // ... - var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }} - v{{$i}}v2 = nil - if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } - v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1) - v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) - bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") - testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") - testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") - bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") - v{{$i}}v2 = nil - v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) - testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") - testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") - } -{{end}}{{end}}{{end}} -} - -func doTestMammothMaps(t *testing.T, h Handle) { -{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* -*/}} - for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } { - // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) - var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} - v{{$i}}v1 = v - bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map - testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map - testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr") - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len") - bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") - v{{$i}}v2 = nil - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil") - // ... - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map - var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }} - v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1) - v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2) - bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom") - testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") - testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len") - } -{{end}}{{end}}{{end}} - -} - -func doTestMammothMapsAndSlices(t *testing.T, h Handle) { - doTestMammothSlices(t, h) - doTestMammothMaps(t, h) -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl b/src/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl deleted file mode 100644 index 71eaf618..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl +++ /dev/null @@ -1,94 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT. - -package codec - -// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go.... -// -// Add: -// - test file for creating a mammoth generated file as _mammoth_generated.go -// - generate a second mammoth files in a different file: mammoth2_generated_test.go -// - mammoth-test.go.tmpl will do this -// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags) -// - as part of TestMammoth, run it also -// - this will cover all the codecgen, gen-helper, etc in one full run -// - check in mammoth* files into github also -// - then -// -// Now, add some types: -// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it -// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types -// - this wrapper object is what we work encode/decode (so that the codecgen methods are called) - - -// import "encoding/binary" -import "fmt" - -type TestMammoth2 struct { - -{{range .Values }}{{if .Primitive }}{{/* -*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} -{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} -{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* -*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} -{{end}}{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* -*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} -{{end}}{{end}}{{end}} - -} - -// ----------- - -type testMammoth2Binary uint64 -func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) { -data = make([]byte, 8) -bigen.PutUint64(data, uint64(x)) -return -} -func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) { -*x = testMammoth2Binary(bigen.Uint64(data)) -return -} - -type testMammoth2Text uint64 -func (x testMammoth2Text) MarshalText() (data []byte, err error) { -data = []byte(fmt.Sprintf("%b", uint64(x))) -return -} -func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) { -_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x)) -return -} - -type testMammoth2Json uint64 -func (x testMammoth2Json) MarshalJSON() (data []byte, err error) { -data = []byte(fmt.Sprintf("%v", uint64(x))) -return -} -func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) { -_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x)) -return -} - -type testMammoth2Basic [4]uint64 - -type TestMammoth2Wrapper struct { - V TestMammoth2 - T testMammoth2Text - B testMammoth2Binary - J testMammoth2Json - C testMammoth2Basic - M map[testMammoth2Basic]TestMammoth2 - L []TestMammoth2 - A [4]int64 -} diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go index bf311a67..da0500d1 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go @@ -1,5 +1,5 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. /* MSGPACK @@ -15,8 +15,8 @@ For compatibility with behaviour of msgpack-c reference implementation: - Go intX (<0) IS ENCODED AS msgpack -ve fixnum, signed -*/ +*/ package codec import ( @@ -24,144 +24,59 @@ import ( "io" "math" "net/rpc" - "reflect" - "time" ) const ( mpPosFixNumMin byte = 0x00 - mpPosFixNumMax byte = 0x7f - mpFixMapMin byte = 0x80 - mpFixMapMax byte = 0x8f - mpFixArrayMin byte = 0x90 - mpFixArrayMax byte = 0x9f - mpFixStrMin byte = 0xa0 - mpFixStrMax byte = 0xbf - mpNil byte = 0xc0 - _ byte = 0xc1 - mpFalse byte = 0xc2 - mpTrue byte = 0xc3 - mpFloat byte = 0xca - mpDouble byte = 0xcb - mpUint8 byte = 0xcc - mpUint16 byte = 0xcd - mpUint32 byte = 0xce - mpUint64 byte = 0xcf - mpInt8 byte = 0xd0 - mpInt16 byte = 0xd1 - mpInt32 byte = 0xd2 - mpInt64 byte = 0xd3 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 // extensions below - mpBin8 byte = 0xc4 - mpBin16 byte = 0xc5 - mpBin32 byte = 0xc6 - mpExt8 byte = 0xc7 - mpExt16 byte = 0xc8 - mpExt32 byte = 0xc9 - mpFixExt1 byte = 0xd4 - mpFixExt2 byte = 0xd5 - mpFixExt4 byte = 0xd6 - mpFixExt8 byte = 0xd7 - mpFixExt16 byte = 0xd8 - - mpStr8 byte = 0xd9 // new - mpStr16 byte = 0xda - mpStr32 byte = 0xdb - - mpArray16 byte = 0xdc - mpArray32 byte = 0xdd - - mpMap16 byte = 0xde - mpMap32 byte = 0xdf - - mpNegFixNumMin byte = 0xe0 - mpNegFixNumMax byte = 0xff + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff ) -var mpTimeExtTag int8 = -1 -var mpTimeExtTagU = uint8(mpTimeExtTag) - -// var mpdesc = map[byte]string{ -// mpPosFixNumMin: "PosFixNumMin", -// mpPosFixNumMax: "PosFixNumMax", -// mpFixMapMin: "FixMapMin", -// mpFixMapMax: "FixMapMax", -// mpFixArrayMin: "FixArrayMin", -// mpFixArrayMax: "FixArrayMax", -// mpFixStrMin: "FixStrMin", -// mpFixStrMax: "FixStrMax", -// mpNil: "Nil", -// mpFalse: "False", -// mpTrue: "True", -// mpFloat: "Float", -// mpDouble: "Double", -// mpUint8: "Uint8", -// mpUint16: "Uint16", -// mpUint32: "Uint32", -// mpUint64: "Uint64", -// mpInt8: "Int8", -// mpInt16: "Int16", -// mpInt32: "Int32", -// mpInt64: "Int64", -// mpBin8: "Bin8", -// mpBin16: "Bin16", -// mpBin32: "Bin32", -// mpExt8: "Ext8", -// mpExt16: "Ext16", -// mpExt32: "Ext32", -// mpFixExt1: "FixExt1", -// mpFixExt2: "FixExt2", -// mpFixExt4: "FixExt4", -// mpFixExt8: "FixExt8", -// mpFixExt16: "FixExt16", -// mpStr8: "Str8", -// mpStr16: "Str16", -// mpStr32: "Str32", -// mpArray16: "Array16", -// mpArray32: "Array32", -// mpMap16: "Map16", -// mpMap32: "Map32", -// mpNegFixNumMin: "NegFixNumMin", -// mpNegFixNumMax: "NegFixNumMax", -// } - -func mpdesc(bd byte) string { - switch bd { - case mpNil: - return "nil" - case mpFalse: - return "false" - case mpTrue: - return "true" - case mpFloat, mpDouble: - return "float" - case mpUint8, mpUint16, mpUint32, mpUint64: - return "uint" - case mpInt8, mpInt16, mpInt32, mpInt64: - return "int" - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - return "int" - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - return "int" - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - return "string|bytes" - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - return "bytes" - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - return "array" - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - return "map" - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - return "ext" - default: - return "unknown" - } - } -} - // MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec // that the backend RPC service takes multiple arguments, which have been arranged // in sequence in the slice. @@ -172,102 +87,76 @@ type MsgpackSpecRpcMultiArgs []interface{} // A MsgpackContainer type specifies the different types of msgpackContainers. type msgpackContainerType struct { - fixCutoff uint8 - bFixMin, b8, b16, b32 byte - // hasFixMin, has8, has8Always bool + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool } var ( - msgpackContainerRawLegacy = msgpackContainerType{ - 32, mpFixStrMin, 0, mpStr16, mpStr32, - } - msgpackContainerStr = msgpackContainerType{ - 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false, - } - msgpackContainerBin = msgpackContainerType{ - 0, 0, mpBin8, mpBin16, mpBin32, // false, true, true, - } - msgpackContainerList = msgpackContainerType{ - 16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false, - } - msgpackContainerMap = msgpackContainerType{ - 16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false, - } + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} ) //--------------------------------------------- type msgpackEncDriver struct { - noBuiltInTypes - encDriverNoopContainerWriter - // encNoSeparator - e *Encoder - w *encWriterSwitch + w encWriter h *MsgpackHandle - x [8]byte - // _ [3]uint64 // padding } -func (e *msgpackEncDriver) EncodeNil() { +func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} + +func (e *msgpackEncDriver) encodeNil() { e.w.writen1(mpNil) } -func (e *msgpackEncDriver) EncodeInt(i int64) { - if e.h.PositiveIntUnsigned && i >= 0 { - e.EncodeUint(uint64(i)) - } else if i > math.MaxInt8 { - if i <= math.MaxInt16 { - e.w.writen1(mpInt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i <= math.MaxInt32 { - e.w.writen1(mpInt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpInt64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } - } else if i >= -32 { - if e.h.NoFixedNum { - e.w.writen2(mpInt8, byte(i)) - } else { - e.w.writen1(byte(i)) - } - } else if i >= math.MinInt8 { +func (e *msgpackEncDriver) encodeInt(i int64) { + + switch { + case i >= 0: + e.encodeUint(uint64(i)) + case i >= -32: + e.w.writen1(byte(i)) + case i >= math.MinInt8: e.w.writen2(mpInt8, byte(i)) - } else if i >= math.MinInt16 { + case i >= math.MinInt16: e.w.writen1(mpInt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i >= math.MinInt32 { + e.w.writeUint16(uint16(i)) + case i >= math.MinInt32: e.w.writen1(mpInt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { + e.w.writeUint32(uint32(i)) + default: e.w.writen1(mpInt64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + e.w.writeUint64(uint64(i)) } } -func (e *msgpackEncDriver) EncodeUint(i uint64) { - if i <= math.MaxInt8 { - if e.h.NoFixedNum { - e.w.writen2(mpUint8, byte(i)) - } else { - e.w.writen1(byte(i)) - } - } else if i <= math.MaxUint8 { +func (e *msgpackEncDriver) encodeUint(i uint64) { + switch { + case i <= math.MaxInt8: + e.w.writen1(byte(i)) + case i <= math.MaxUint8: e.w.writen2(mpUint8, byte(i)) - } else if i <= math.MaxUint16 { + case i <= math.MaxUint16: e.w.writen1(mpUint16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i <= math.MaxUint32 { + e.w.writeUint16(uint16(i)) + case i <= math.MaxUint32: e.w.writen1(mpUint32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { + e.w.writeUint32(uint32(i)) + default: e.w.writen1(mpUint64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + e.w.writeUint64(uint64(i)) } } -func (e *msgpackEncDriver) EncodeBool(b bool) { +func (e *msgpackEncDriver) encodeBool(b bool) { if b { e.w.writen1(mpTrue) } else { @@ -275,318 +164,229 @@ func (e *msgpackEncDriver) EncodeBool(b bool) { } } -func (e *msgpackEncDriver) EncodeFloat32(f float32) { +func (e *msgpackEncDriver) encodeFloat32(f float32) { e.w.writen1(mpFloat) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) + e.w.writeUint32(math.Float32bits(f)) } -func (e *msgpackEncDriver) EncodeFloat64(f float64) { +func (e *msgpackEncDriver) encodeFloat64(f float64) { e.w.writen1(mpDouble) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *msgpackEncDriver) EncodeTime(t time.Time) { - if t.IsZero() { - e.EncodeNil() - return - } - t = t.UTC() - sec, nsec := t.Unix(), uint64(t.Nanosecond()) - var data64 uint64 - var l = 4 - if sec >= 0 && sec>>34 == 0 { - data64 = (nsec << 34) | uint64(sec) - if data64&0xffffffff00000000 != 0 { - l = 8 - } - } else { - l = 12 - } - if e.h.WriteExt { - e.encodeExtPreamble(mpTimeExtTagU, l) - } else { - e.writeContainerLen(msgpackContainerRawLegacy, l) - } - switch l { - case 4: - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64)) - case 8: - bigenHelper{e.x[:8], e.w}.writeUint64(data64) - case 12: - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec)) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec)) - } -} - -func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(v) - if bs == nil { - e.EncodeNil() - return - } - if e.h.WriteExt { - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) - } else { - e.EncodeStringBytesRaw(bs) - } -} - -func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) + e.w.writeUint64(math.Float64bits(f)) } func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - if l == 1 { + switch { + case l == 1: e.w.writen2(mpFixExt1, xtag) - } else if l == 2 { + case l == 2: e.w.writen2(mpFixExt2, xtag) - } else if l == 4 { + case l == 4: e.w.writen2(mpFixExt4, xtag) - } else if l == 8 { + case l == 8: e.w.writen2(mpFixExt8, xtag) - } else if l == 16 { + case l == 16: e.w.writen2(mpFixExt16, xtag) - } else if l < 256 { + case l < 256: e.w.writen2(mpExt8, byte(l)) e.w.writen1(xtag) - } else if l < 65536 { + case l < 65536: e.w.writen1(mpExt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writeUint16(uint16(l)) e.w.writen1(xtag) - } else { + default: e.w.writen1(mpExt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writeUint32(uint32(l)) e.w.writen1(xtag) } } -func (e *msgpackEncDriver) WriteArrayStart(length int) { +func (e *msgpackEncDriver) encodeArrayPreamble(length int) { e.writeContainerLen(msgpackContainerList, length) } -func (e *msgpackEncDriver) WriteMapStart(length int) { +func (e *msgpackEncDriver) encodeMapPreamble(length int) { e.writeContainerLen(msgpackContainerMap, length) } -func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { - slen := len(s) - if c == cRAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, slen) +func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) } else { - e.writeContainerLen(msgpackContainerRawLegacy, slen) + e.writeContainerLen(msgpackContainerStr, len(s)) } - if slen > 0 { + if len(s) > 0 { e.w.writestr(s) } } -func (e *msgpackEncDriver) EncodeStringEnc(c charEncoding, s string) { - slen := len(s) - if e.h.WriteExt { - e.writeContainerLen(msgpackContainerStr, slen) - } else { - e.writeContainerLen(msgpackContainerRawLegacy, slen) - } - if slen > 0 { - e.w.writestr(s) - } +func (e *msgpackEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) } -func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { - if bs == nil { - e.EncodeNil() - return - } - slen := len(bs) - if c == cRAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, slen) +func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) } else { - e.writeContainerLen(msgpackContainerRawLegacy, slen) + e.writeContainerLen(msgpackContainerStr, len(bs)) } - if slen > 0 { - e.w.writeb(bs) - } -} - -func (e *msgpackEncDriver) EncodeStringBytesRaw(bs []byte) { - if bs == nil { - e.EncodeNil() - return - } - slen := len(bs) - if e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, slen) - } else { - e.writeContainerLen(msgpackContainerRawLegacy, slen) - } - if slen > 0 { + if len(bs) > 0 { e.w.writeb(bs) } } func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - if ct.fixCutoff > 0 && l < int(ct.fixCutoff) { + switch { + case ct.hasFixMin && l < ct.fixCutoff: e.w.writen1(ct.bFixMin | byte(l)) - } else if ct.b8 > 0 && l < 256 { + case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): e.w.writen2(ct.b8, uint8(l)) - } else if l < 65536 { + case l < 65536: e.w.writen1(ct.b16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - } else { + e.w.writeUint16(uint16(l)) + default: e.w.writen1(ct.b32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writeUint32(uint32(l)) } } //--------------------------------------------- type msgpackDecDriver struct { - d *Decoder - r *decReaderSwitch - h *MsgpackHandle - // b [scratchByteArrayLen]byte + r decReader + h *MsgpackHandle bd byte bdRead bool - br bool // bytes reader - noBuiltInTypes - // noStreamingCodec - // decNoSeparator - decDriverNoopContainerReader - // _ [3]uint64 // padding + bdType valueType } +func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} + // Note: This returns either a primitive (int, bool, etc) for non-containers, // or a containerType, or a specific type denoting nil or extension. // It is called when a nil interface{} is passed, leaving it up to the DecDriver // to introspect the stream and decide how best to decode. // It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } +func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() bd := d.bd - n := d.d.naked() - var decodeFurther bool switch bd { case mpNil: - n.v = valueTypeNil + vt = valueTypeNil d.bdRead = false case mpFalse: - n.v = valueTypeBool - n.b = false + vt = valueTypeBool + v = false case mpTrue: - n.v = valueTypeBool - n.b = true + vt = valueTypeBool + v = true case mpFloat: - n.v = valueTypeFloat - n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + vt = valueTypeFloat + v = float64(math.Float32frombits(d.r.readUint32())) case mpDouble: - n.v = valueTypeFloat - n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + vt = valueTypeFloat + v = math.Float64frombits(d.r.readUint64()) case mpUint8: - n.v = valueTypeUint - n.u = uint64(d.r.readn1()) + vt = valueTypeUint + v = uint64(d.r.readn1()) case mpUint16: - n.v = valueTypeUint - n.u = uint64(bigen.Uint16(d.r.readx(2))) + vt = valueTypeUint + v = uint64(d.r.readUint16()) case mpUint32: - n.v = valueTypeUint - n.u = uint64(bigen.Uint32(d.r.readx(4))) + vt = valueTypeUint + v = uint64(d.r.readUint32()) case mpUint64: - n.v = valueTypeUint - n.u = uint64(bigen.Uint64(d.r.readx(8))) + vt = valueTypeUint + v = uint64(d.r.readUint64()) case mpInt8: - n.v = valueTypeInt - n.i = int64(int8(d.r.readn1())) + vt = valueTypeInt + v = int64(int8(d.r.readn1())) case mpInt16: - n.v = valueTypeInt - n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + vt = valueTypeInt + v = int64(int16(d.r.readUint16())) case mpInt32: - n.v = valueTypeInt - n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + vt = valueTypeInt + v = int64(int32(d.r.readUint32())) case mpInt64: - n.v = valueTypeInt - n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + vt = valueTypeInt + v = int64(int64(d.r.readUint64())) default: switch { case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: // positive fixnum (always signed) - n.v = valueTypeInt - n.i = int64(int8(bd)) + vt = valueTypeInt + v = int64(int8(bd)) case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: // negative fixnum - n.v = valueTypeInt - n.i = int64(int8(bd)) + vt = valueTypeInt + v = int64(int8(bd)) case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.WriteExt || d.h.RawToString { - n.v = valueTypeString - n.s = d.DecodeString() + if d.h.RawToString { + var rvm string + vt = valueTypeString + v = &rvm } else { - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false) + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm } + decodeFurther = true case bd == mpBin8, bd == mpBin16, bd == mpBin32: - decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + decodeFurther = true case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - n.v = valueTypeArray + vt = valueTypeArray decodeFurther = true case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - n.v = valueTypeMap + vt = valueTypeMap decodeFurther = true case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - n.v = valueTypeExt clen := d.readExtLen() - n.u = uint64(d.r.readn1()) - if n.u == uint64(mpTimeExtTagU) { - n.v = valueTypeTime - n.t = d.decodeTime(clen) - } else if d.br { - n.l = d.r.readx(uint(clen)) - } else { - n.l = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) - } + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(clen) + v = &re + vt = valueTypeExt default: - d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) } } if !decodeFurther { d.bdRead = false } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } + return } // int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeInt64() (i int64) { - if !d.bdRead { - d.readNextBd() - } +func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { switch d.bd { case mpUint8: i = int64(uint64(d.r.readn1())) case mpUint16: - i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + i = int64(uint64(d.r.readUint16())) case mpUint32: - i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + i = int64(uint64(d.r.readUint32())) case mpUint64: - i = int64(bigen.Uint64(d.r.readx(8))) + i = int64(d.r.readUint64()) case mpInt8: i = int64(int8(d.r.readn1())) case mpInt16: - i = int64(int16(bigen.Uint16(d.r.readx(2)))) + i = int64(int16(d.r.readUint16())) case mpInt32: - i = int64(int32(bigen.Uint32(d.r.readx(4)))) + i = int64(int32(d.r.readUint32())) case mpInt64: - i = int64(bigen.Uint64(d.r.readx(8))) + i = int64(d.r.readUint64()) default: switch { case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: @@ -594,8 +394,13 @@ func (d *msgpackDecDriver) DecodeInt64() (i int64) { case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: i = int64(int8(d.bd)) default: - d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) - return + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) } } d.bdRead = false @@ -603,57 +408,54 @@ func (d *msgpackDecDriver) DecodeInt64() (i int64) { } // uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { - if !d.bdRead { - d.readNextBd() - } +func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { switch d.bd { case mpUint8: ui = uint64(d.r.readn1()) case mpUint16: - ui = uint64(bigen.Uint16(d.r.readx(2))) + ui = uint64(d.r.readUint16()) case mpUint32: - ui = uint64(bigen.Uint32(d.r.readx(4))) + ui = uint64(d.r.readUint32()) case mpUint64: - ui = bigen.Uint64(d.r.readx(8)) + ui = d.r.readUint64() case mpInt8: if i := int64(int8(d.r.readn1())); i >= 0 { ui = uint64(i) } else { - d.d.errorf("assigning negative signed value: %v, to unsigned type", i) - return + decErr("Assigning negative signed value: %v, to unsigned type", i) } case mpInt16: - if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + if i := int64(int16(d.r.readUint16())); i >= 0 { ui = uint64(i) } else { - d.d.errorf("assigning negative signed value: %v, to unsigned type", i) - return + decErr("Assigning negative signed value: %v, to unsigned type", i) } case mpInt32: - if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + if i := int64(int32(d.r.readUint32())); i >= 0 { ui = uint64(i) } else { - d.d.errorf("assigning negative signed value: %v, to unsigned type", i) - return + decErr("Assigning negative signed value: %v, to unsigned type", i) } case mpInt64: - if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + if i := int64(d.r.readUint64()); i >= 0 { ui = uint64(i) } else { - d.d.errorf("assigning negative signed value: %v, to unsigned type", i) - return + decErr("Assigning negative signed value: %v, to unsigned type", i) } default: switch { case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: ui = uint64(d.bd) case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) - return + decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) default: - d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) - return + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) } } d.bdRead = false @@ -661,173 +463,160 @@ func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { } // float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) DecodeFloat64() (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFloat { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == mpDouble { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - f = float64(d.DecodeInt64()) +func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case mpFloat: + f = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + f = math.Float64frombits(d.r.readUint64()) + default: + f = float64(d.decodeInt(0)) } + checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFalse || d.bd == 0 { +func (d *msgpackDecDriver) decodeBool() (b bool) { + switch d.bd { + case mpFalse, 0: // b = false - } else if d.bd == mpTrue || d.bd == 1 { + case mpTrue, 1: b = true - } else { - d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) - return + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) } d.bdRead = false return } -func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() +func (d *msgpackDecDriver) decodeString() (s string) { + clen := d.readContainerLen(msgpackContainerStr) + if clen > 0 { + s = string(d.r.readn(clen)) } + d.bdRead = false + return +} - bd := d.bd +// Callers must check if changed=true (to decide whether to replace the one they have) +func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + // bytes can be decoded from msgpackContainerStr or msgpackContainerBin var clen int - if bd == mpNil { - d.bdRead = false - return - } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - clen = d.readContainerLen(msgpackContainerBin) // binary - } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { - clen = d.readContainerLen(msgpackContainerStr) // string/raw - } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { - // check if an "array" of uint8's - if zerocopy && len(bs) == 0 { - bs = d.d.b[:] - } - bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) - return - } else { - d.d.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) - return + switch d.bd { + case mpBin8, mpBin16, mpBin32: + clen = d.readContainerLen(msgpackContainerBin) + default: + clen = d.readContainerLen(msgpackContainerStr) } - - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(uint(clen)) - } else if len(bs) == 0 { - bs = d.d.b[:] + // if clen < 0 { + // changed = true + // panic("length cannot be zero. this cannot be nil.") + // } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + // Return changed=true if length of passed slice diff from length of bytes in stream + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true } + d.r.readb(bs) } - return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) -} - -func (d *msgpackDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.d.b[:], true)) -} - -func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { - return d.DecodeBytes(d.d.b[:], true) -} - -func (d *msgpackDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true + d.bdRead = false + return } -func (d *msgpackDecDriver) uncacheRead() { +// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. +func (d *msgpackDecDriver) initReadNext() { if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *msgpackDecDriver) ContainerType() (vt valueType) { - if !d.bdRead { - d.readNextBd() + return } - bd := d.bd - // if bd == mpNil { - // // nil - // } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - // // binary - // } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { - // // string/raw - // } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { - // // array - // } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { - // // map - // } - if bd == mpNil { - return valueTypeNil - } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - return valueTypeBytes - } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { - if d.h.WriteExt || d.h.RawToString { // UTF-8 string (new spec) - return valueTypeString + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *msgpackDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + bd := d.bd + switch bd { + case mpNil: + d.bdType = valueTypeNil + case mpFalse, mpTrue: + d.bdType = valueTypeBool + case mpFloat, mpDouble: + d.bdType = valueTypeFloat + case mpUint8, mpUint16, mpUint32, mpUint64: + d.bdType = valueTypeUint + case mpInt8, mpInt16, mpInt32, mpInt64: + d.bdType = valueTypeInt + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + d.bdType = valueTypeInt + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + d.bdType = valueTypeInt + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + d.bdType = valueTypeString + } else { + d.bdType = valueTypeBytes + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + d.bdType = valueTypeBytes + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + d.bdType = valueTypeArray + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + d.bdType = valueTypeMap + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + d.bdType = valueTypeExt + default: + decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } } - return valueTypeBytes // raw (old spec) - } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { - return valueTypeArray - } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { - return valueTypeMap } - // else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - // } - return valueTypeUnset + return d.bdType } -func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { - if !d.bdRead { - d.readNextBd() - } +func (d *msgpackDecDriver) tryDecodeAsNil() bool { if d.bd == mpNil { d.bdRead = false return true } - return + return false } func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { bd := d.bd - if bd == mpNil { + switch { + case bd == mpNil: clen = -1 // to represent nil - } else if bd == ct.b8 { + case bd == ct.b8: clen = int(d.r.readn1()) - } else if bd == ct.b16 { - clen = int(bigen.Uint16(d.r.readx(2))) - } else if bd == ct.b32 { - clen = int(bigen.Uint32(d.r.readx(4))) - } else if (ct.bFixMin & bd) == ct.bFixMin { + case bd == ct.b16: + clen = int(d.r.readUint16()) + case bd == ct.b32: + clen = int(d.r.readUint32()) + case (ct.bFixMin & bd) == ct.bFixMin: clen = int(ct.bFixMin ^ bd) - } else { - d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) - return + default: + decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) } d.bdRead = false return } -func (d *msgpackDecDriver) ReadMapStart() int { - if !d.bdRead { - d.readNextBd() - } +func (d *msgpackDecDriver) readMapLen() int { return d.readContainerLen(msgpackContainerMap) } -func (d *msgpackDecDriver) ReadArrayStart() int { - if !d.bdRead { - d.readNextBd() - } +func (d *msgpackDecDriver) readArrayLen() int { return d.readContainerLen(msgpackContainerList) } @@ -848,107 +637,30 @@ func (d *msgpackDecDriver) readExtLen() (clen int) { case mpExt8: clen = int(d.r.readn1()) case mpExt16: - clen = int(bigen.Uint16(d.r.readx(2))) + clen = int(d.r.readUint16()) case mpExt32: - clen = int(bigen.Uint32(d.r.readx(4))) - default: - d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) - return - } - return -} - -func (d *msgpackDecDriver) DecodeTime() (t time.Time) { - // decode time from string bytes or ext - if !d.bdRead { - d.readNextBd() - } - bd := d.bd - var clen int - if bd == mpNil { - d.bdRead = false - return - } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - clen = d.readContainerLen(msgpackContainerBin) // binary - } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { - clen = d.readContainerLen(msgpackContainerStr) // string/raw - } else { - // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1 - d.bdRead = false - b2 := d.r.readn1() - if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { - clen = 4 - } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { - clen = 8 - } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { - clen = 12 - } else { - d.d.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) - return - } - } - return d.decodeTime(clen) -} - -func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) { - // bs = d.r.readx(clen) - d.bdRead = false - switch clen { - case 4: - t = time.Unix(int64(bigen.Uint32(d.r.readx(4))), 0).UTC() - case 8: - tv := bigen.Uint64(d.r.readx(8)) - t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() - case 12: - nsec := bigen.Uint32(d.r.readx(4)) - sec := bigen.Uint64(d.r.readx(8)) - t = time.Unix(int64(sec), int64(nsec)).UTC() + clen = int(d.r.readUint32()) default: - d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen) - return + decErr("decoding ext bytes: found unexpected byte: %x", d.bd) } return } -func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } +func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { xbd := d.bd - if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { - xbs = d.DecodeBytes(nil, true) - } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || - (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { - xbs = d.DecodeStringAsBytes() - } else { + switch { + case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: + xbs, _ = d.decodeBytes(nil) + case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, + xbd >= mpFixStrMin && xbd <= mpFixStrMax: + xbs = []byte(d.decodeString()) + default: clen := d.readExtLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) - return - } - if d.br { - xbs = d.r.readx(uint(clen)) - } else { - xbs = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) } + xbs = d.r.readn(clen) } d.bdRead = false return @@ -960,55 +672,35 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs type MsgpackHandle struct { BasicHandle - // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum. - NoFixedNum bool - - // WriteExt controls whether the new spec is honored. - // - // With WriteExt=true, we can encode configured extensions with extension tags - // and encode string/[]byte/extensions in a way compatible with the new spec - // but incompatible with the old spec. + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). // - // For compatibility with the old spec, set WriteExt=false. + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. // - // With WriteExt=false: - // configured extensions are serialized as raw bytes (not msgpack extensions). - // reserved byte descriptors like Str8 and those enabling the new msgpack Binary type - // are not encoded. + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. WriteExt bool - - // PositiveIntUnsigned says to encode positive integers as unsigned. - PositiveIntUnsigned bool - - binaryEncodingType - noElemSeparators - - // _ [1]uint64 // padding } -// Name returns the name of the handle: msgpack -func (h *MsgpackHandle) Name() string { return "msgpack" } - -// SetBytesExt sets an extension -func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { + return &msgpackEncDriver{w: w, h: h} } -func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { - return &msgpackEncDriver{e: e, w: e.w, h: h} +func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { + return &msgpackDecDriver{r: r, h: h} } -func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { - return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} +func (h *MsgpackHandle) writeExt() bool { + return h.WriteExt } -func (e *msgpackEncDriver) reset() { - e.w = e.e.w -} - -func (d *msgpackDecDriver) reset() { - d.r, d.br = d.d.r, d.d.bytes - d.bd, d.bdRead = 0, false +func (h *MsgpackHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle } //-------------------------------------------------- @@ -1029,7 +721,7 @@ func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) err bodyArr = []interface{}{body} } r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false) + return c.write(r2, nil, false, true) } func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { @@ -1041,7 +733,7 @@ func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) e body = nil } r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false) + return c.write(r2, nil, false, true) } func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { @@ -1061,7 +753,8 @@ func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { } func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - if cls := c.cls.load(); cls.closed { + + if c.cls { return io.EOF } @@ -1074,34 +767,28 @@ func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) // return // } - var ba [1]byte - var n int - for { - n, err = c.r.Read(ba[:]) - if err != nil { - return - } - if n == 1 { - break - } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return } - - var b = ba[0] if b != fia { - err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b)) - } else { - err = c.read(&b) - if err == nil { - if b != expectTypeByte { - err = fmt.Errorf("%s - expecting %v but got %x/%s", - msgBadDesc, expectTypeByte, b, mpdesc(b)) - } else { - err = c.read(msgid) - if err == nil { - err = c.read(methodOrError) - } - } - } + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return } return } @@ -1114,8 +801,7 @@ type msgpackSpecRpc struct{} // MsgpackSpecRpc implements Rpc using the communication protocol defined in // the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// -// See GoRpc documentation, for information on buffering for better performance. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. var MsgpackSpecRpc msgpackSpecRpc func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/test.py b/src/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py similarity index 69% rename from src/vendor/github.com/hashicorp/go-msgpack/codec/test.py rename to src/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py index 800376f6..e933838c 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/test.py +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py @@ -4,14 +4,7 @@ # A Test calls this internally to create the golden files # So it can process them (so we don't have to checkin the files). -# Ensure msgpack-python and cbor are installed first, using: -# sudo apt-get install python-dev -# sudo apt-get install python-pip -# pip install --user msgpack-python msgpack-rpc-python cbor - -# Ensure all "string" keys are utf strings (else encoded as bytes) - -import cbor, msgpack, msgpackrpc, sys, os, threading +import msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): # get list with all primitive types, and a combo type @@ -28,52 +21,43 @@ def get_test_data_list(): -3232.0, -6464646464.0, 3232.0, - 6464.0, 6464646464.0, False, True, - u"null", None, - u"some&day>some 0 @@ -96,14 +80,14 @@ def myStopRpcServer(): server.start() def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('127.0.0.1', port) + address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("Echo123", "A1", "B2", "C3") print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doRpcClientToGoSvc(port): # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('127.0.0.1', port) + address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) @@ -118,7 +102,7 @@ def doMain(args): elif len(args) == 2 and args[0] == "rpc-client-go-service": doRpcClientToGoSvc(int(args[1])) else: - print("Usage: test.py " + + print("Usage: msgpack_test.py " + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") if __name__ == "__main__": diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go index 39250881..d014dbdc 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go @@ -1,150 +1,99 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. package codec import ( "bufio" - "errors" "io" "net/rpc" + "sync" ) -var errRpcJsonNeedsTermWhitespace = errors.New("rpc requires JsonHandle with TermWhitespace=true") - // Rpc provides a rpc Server or Client Codec for rpc communication. type Rpc interface { ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec } -// RPCOptions holds options specific to rpc functionality -type RPCOptions struct { - // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls. - // - // Set RPCNoBuffer=true to turn buffering off. - // Buffering can still be done if buffered connections are passed in, or - // buffering is configured on the handle. - RPCNoBuffer bool +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accomodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer } +// ------------------------------------- + // rpcCodec defines the struct members and common methods. type rpcCodec struct { - c io.Closer - r io.Reader - w io.Writer - f ioFlusher - + rwc io.ReadWriteCloser dec *Decoder enc *Encoder - // bw *bufio.Writer - // br *bufio.Reader - h Handle - - cls atomicClsErr + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + cls bool } func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - // return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h) - return newRPCCodec2(conn, conn, conn, h) -} - -func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec { - // defensive: ensure that jsonH has TermWhitespace turned on. - if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { - panic(errRpcJsonNeedsTermWhitespace) - } - // always ensure that we use a flusher, and always flush what was written to the connection. - // we lose nothing by using a buffered writer internally. - f, ok := w.(ioFlusher) - bh := basicHandle(h) - if !bh.RPCNoBuffer { - if bh.WriterBufferSize <= 0 { - if !ok { - bw := bufio.NewWriter(w) - f, w = bw, bw - } - } - if bh.ReaderBufferSize <= 0 { - if _, ok = w.(ioPeeker); !ok { - if _, ok = w.(ioBuffered); !ok { - br := bufio.NewReader(r) - r = br - } - } - } - } + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) return rpcCodec{ - c: c, - w: w, - r: r, - f: f, - h: h, - enc: NewEncoder(w, h), - dec: NewDecoder(r, h), + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), } } -func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) { - if c.c != nil { - cls := c.cls.load() - if cls.closed { - return cls.errClosed - } +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.cls { + return io.EOF } - err = c.enc.Encode(obj1) - if err == nil { - if writeObj2 { - err = c.enc.Encode(obj2) - } - // if err == nil && c.f != nil { - // err = c.f.Flush() - // } + if err = c.enc.Encode(obj1); err != nil { + return } - if c.f != nil { - if err == nil { - err = c.f.Flush() - } else { - _ = c.f.Flush() // swallow flush error, so we maintain prior error on write + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return } } + if doFlush && c.bw != nil { + return c.bw.Flush() + } return } -func (c *rpcCodec) swallow(err *error) { - defer panicToErr(c.dec, err) - c.dec.swallow() -} - func (c *rpcCodec) read(obj interface{}) (err error) { - if c.c != nil { - cls := c.cls.load() - if cls.closed { - return cls.errClosed - } + if c.cls { + return io.EOF } - //If nil is passed in, we should read and discard + //If nil is passed in, we should still attempt to read content to nowhere. if obj == nil { - // var obj2 interface{} - // return c.dec.Decode(&obj2) - c.swallow(&err) - return + var obj2 interface{} + return c.dec.Decode(&obj2) } return c.dec.Decode(obj) } func (c *rpcCodec) Close() error { - if c.c == nil { - return nil - } - cls := c.cls.load() - if cls.closed { - return cls.errClosed + if c.cls { + return io.EOF } - cls.errClosed = c.c.Close() - cls.closed = true - c.cls.store(cls) - return cls.errClosed + c.cls = true + return c.rwc.Close() } func (c *rpcCodec) ReadResponseBody(body interface{}) error { @@ -158,11 +107,16 @@ type goRpcCodec struct { } func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - return c.write(r, body, true) + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) } func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - return c.write(r, body, true) + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) } func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { @@ -184,36 +138,7 @@ func (c *goRpcCodec) ReadRequestBody(body interface{}) error { type goRpc struct{} // GoRpc implements Rpc using the communication protocol defined in net/rpc package. -// -// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered. -// -// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle. -// This ensures we use an adequate buffer during reading and writing. -// If not configured, we will internally initialize and use a buffer during reads and writes. -// This can be turned off via the RPCNoBuffer option on the Handle. -// var handle codec.JsonHandle -// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer -// -// Example 1: one way of configuring buffering explicitly: -// var handle codec.JsonHandle // codec handle -// handle.ReaderBufferSize = 1024 -// handle.WriterBufferSize = 1024 -// var conn io.ReadWriteCloser // connection got from a socket -// var serverCodec = GoRpc.ServerCodec(conn, handle) -// var clientCodec = GoRpc.ClientCodec(conn, handle) -// -// Example 2: you can also explicitly create a buffered connection yourself, -// and not worry about configuring the buffer sizes in the Handle. -// var handle codec.Handle // codec handle -// var conn io.ReadWriteCloser // connection got from a socket -// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser -// io.Closer -// *bufio.Reader -// *bufio.Writer -// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} -// var serverCodec = GoRpc.ServerCodec(bufconn, handle) -// var clientCodec = GoRpc.ClientCodec(bufconn, handle) -// +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. var GoRpc goRpc func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { @@ -223,3 +148,5 @@ func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { return &goRpcCodec{newRPCCodec(conn, h)} } + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/simple.go index a3257c1a..9e4d148a 100644 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/simple.go +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/simple.go @@ -1,13 +1,9 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. package codec -import ( - "math" - "reflect" - "time" -) +import "math" const ( _ uint8 = iota @@ -21,8 +17,6 @@ const ( simpleVdPosInt = 8 simpleVdNegInt = 12 - simpleVdTime = 24 - // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) simpleVdString = 216 simpleVdByteArray = 224 @@ -32,27 +26,23 @@ const ( ) type simpleEncDriver struct { - noBuiltInTypes - // encNoSeparator - e *Encoder h *SimpleHandle - w *encWriterSwitch - b [8]byte - // c containerState - encDriverTrackContainerWriter - // encDriverNoopContainerWriter - _ [3]uint64 // padding + w encWriter + //b [8]byte +} + +func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { + return false } -func (e *simpleEncDriver) EncodeNil() { +func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { +} + +func (e *simpleEncDriver) encodeNil() { e.w.writen1(simpleVdNil) } -func (e *simpleEncDriver) EncodeBool(b bool) { - if e.h.EncZeroValuesAsNil && e.c != containerMapKey && !b { - e.EncodeNil() - return - } +func (e *simpleEncDriver) encodeBool(b bool) { if b { e.w.writen1(simpleVdTrue) } else { @@ -60,25 +50,17 @@ func (e *simpleEncDriver) EncodeBool(b bool) { } } -func (e *simpleEncDriver) EncodeFloat32(f float32) { - if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 { - e.EncodeNil() - return - } +func (e *simpleEncDriver) encodeFloat32(f float32) { e.w.writen1(simpleVdFloat32) - bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) + e.w.writeUint32(math.Float32bits(f)) } -func (e *simpleEncDriver) EncodeFloat64(f float64) { - if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 { - e.EncodeNil() - return - } +func (e *simpleEncDriver) encodeFloat64(f float64) { e.w.writen1(simpleVdFloat64) - bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) + e.w.writeUint64(math.Float64bits(f)) } -func (e *simpleEncDriver) EncodeInt(v int64) { +func (e *simpleEncDriver) encodeInt(v int64) { if v < 0 { e.encUint(uint64(-v), simpleVdNegInt) } else { @@ -86,185 +68,123 @@ func (e *simpleEncDriver) EncodeInt(v int64) { } } -func (e *simpleEncDriver) EncodeUint(v uint64) { +func (e *simpleEncDriver) encodeUint(v uint64) { e.encUint(v, simpleVdPosInt) } func (e *simpleEncDriver) encUint(v uint64, bd uint8) { - if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == 0 { - e.EncodeNil() - return - } - if v <= math.MaxUint8 { + switch { + case v <= math.MaxUint8: e.w.writen2(bd, uint8(v)) - } else if v <= math.MaxUint16 { + case v <= math.MaxUint16: e.w.writen1(bd + 1) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: e.w.writen1(bd + 2) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) - } else { // if v <= math.MaxUint64 { + e.w.writeUint32(uint32(v)) + case v <= math.MaxUint64: e.w.writen1(bd + 3) - bigenHelper{e.b[:8], e.w}.writeUint64(v) + e.w.writeUint64(v) } } func (e *simpleEncDriver) encLen(bd byte, length int) { - if length == 0 { + switch { + case length == 0: e.w.writen1(bd) - } else if length <= math.MaxUint8 { + case length <= math.MaxUint8: e.w.writen1(bd + 1) e.w.writen1(uint8(length)) - } else if length <= math.MaxUint16 { + case length <= math.MaxUint16: e.w.writen1(bd + 2) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) - } else if int64(length) <= math.MaxUint32 { + e.w.writeUint16(uint16(length)) + case int64(length) <= math.MaxUint32: e.w.writen1(bd + 3) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) - } else { + e.w.writeUint32(uint32(length)) + default: e.w.writen1(bd + 4) - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) + e.w.writeUint64(uint64(length)) } } -func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(rv) - if bs == nil { - e.EncodeNil() - return - } - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) -} - -func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { e.encLen(simpleVdExt, length) e.w.writen1(xtag) } -func (e *simpleEncDriver) WriteArrayStart(length int) { - e.c = containerArrayStart +func (e *simpleEncDriver) encodeArrayPreamble(length int) { e.encLen(simpleVdArray, length) } -func (e *simpleEncDriver) WriteMapStart(length int) { - e.c = containerMapStart +func (e *simpleEncDriver) encodeMapPreamble(length int) { e.encLen(simpleVdMap, length) } -// func (e *simpleEncDriver) EncodeSymbol(v string) { -// e.EncodeStringEnc(cUTF8, v) -// } - -func (e *simpleEncDriver) EncodeStringEnc(c charEncoding, v string) { - if false && e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == "" { - e.EncodeNil() - return - } +func (e *simpleEncDriver) encodeString(c charEncoding, v string) { e.encLen(simpleVdString, len(v)) e.w.writestr(v) } -func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { - e.EncodeStringEnc(c, v) +func (e *simpleEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) } -func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - e.EncodeStringBytesRaw(v) -} - -func (e *simpleEncDriver) EncodeStringBytesRaw(v []byte) { - // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil { - if v == nil { - e.EncodeNil() - return - } +func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { e.encLen(simpleVdByteArray, len(v)) e.w.writeb(v) } -func (e *simpleEncDriver) EncodeTime(t time.Time) { - // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() { - if t.IsZero() { - e.EncodeNil() - return - } - v, err := t.MarshalBinary() - if err != nil { - e.e.errorv(err) - return - } - // time.Time marshalbinary takes about 14 bytes. - e.w.writen2(simpleVdTime, uint8(len(v))) - e.w.writeb(v) -} - //------------------------------------ type simpleDecDriver struct { - d *Decoder h *SimpleHandle - r *decReaderSwitch + r decReader bdRead bool + bdType valueType bd byte - br bool // a bytes reader? - c containerState - // b [scratchByteArrayLen]byte - noBuiltInTypes - // noStreamingCodec - decDriverNoopContainerReader - // _ [3]uint64 // padding + //b [8]byte } -func (d *simpleDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *simpleDecDriver) uncacheRead() { +func (d *simpleDecDriver) initReadNext() { if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *simpleDecDriver) ContainerType() (vt valueType) { - if !d.bdRead { - d.readNextBd() + return } - switch d.bd { - case simpleVdNil: - return valueTypeNil - case simpleVdByteArray, simpleVdByteArray + 1, - simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - return valueTypeBytes - case simpleVdString, simpleVdString + 1, - simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - return valueTypeString - case simpleVdArray, simpleVdArray + 1, - simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - return valueTypeArray - case simpleVdMap, simpleVdMap + 1, - simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - return valueTypeMap - // case simpleVdTime: - // return valueTypeTime + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *simpleDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.bd { + case simpleVdNil: + d.bdType = valueTypeNil + case simpleVdTrue, simpleVdFalse: + d.bdType = valueTypeBool + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + d.bdType = valueTypeUint + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + d.bdType = valueTypeInt + case simpleVdFloat32, simpleVdFloat64: + d.bdType = valueTypeFloat + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + d.bdType = valueTypeString + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + d.bdType = valueTypeBytes + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + d.bdType = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + d.bdType = valueTypeArray + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) + } } - // else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - // } - return valueTypeUnset + return d.bdType } -func (d *simpleDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } +func (d *simpleDecDriver) tryDecodeAsNil() bool { if d.bd == simpleVdNil { d.bdRead = false return true @@ -272,137 +192,112 @@ func (d *simpleDecDriver) TryDecodeAsNil() bool { return false } -func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { - if !d.bdRead { - d.readNextBd() - } +func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { +} + +func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { switch d.bd { case simpleVdPosInt: ui = uint64(d.r.readn1()) + i = int64(ui) case simpleVdPosInt + 1: - ui = uint64(bigen.Uint16(d.r.readx(2))) + ui = uint64(d.r.readUint16()) + i = int64(ui) case simpleVdPosInt + 2: - ui = uint64(bigen.Uint32(d.r.readx(4))) + ui = uint64(d.r.readUint32()) + i = int64(ui) case simpleVdPosInt + 3: - ui = uint64(bigen.Uint64(d.r.readx(8))) + ui = uint64(d.r.readUint64()) + i = int64(ui) case simpleVdNegInt: ui = uint64(d.r.readn1()) + i = -(int64(ui)) neg = true case simpleVdNegInt + 1: - ui = uint64(bigen.Uint16(d.r.readx(2))) + ui = uint64(d.r.readUint16()) + i = -(int64(ui)) neg = true case simpleVdNegInt + 2: - ui = uint64(bigen.Uint32(d.r.readx(4))) + ui = uint64(d.r.readUint32()) + i = -(int64(ui)) neg = true case simpleVdNegInt + 3: - ui = uint64(bigen.Uint64(d.r.readx(8))) + ui = uint64(d.r.readUint64()) + i = -(int64(ui)) neg = true default: - d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) - return + decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) } // don't do this check, because callers may only want the unsigned value. // if ui > math.MaxInt64 { - // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) - // return + // decErr("decIntAny: Integer out of range for signed int64: %v", ui) // } return } -func (d *simpleDecDriver) DecodeInt64() (i int64) { - ui, neg := d.decCheckInteger() - i = chkOvf.SignedIntV(ui) - if neg { - i = -i - } +func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) d.bdRead = false return } -func (d *simpleDecDriver) DecodeUint64() (ui uint64) { - ui, neg := d.decCheckInteger() +func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() if neg { - d.d.errorf("assigning negative signed value to unsigned type") - return + decErr("Assigning negative signed value: %v, to unsigned type", i) } + checkOverflow(ui, 0, bitsize) d.bdRead = false return } -func (d *simpleDecDriver) DecodeFloat64() (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == simpleVdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { +func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case simpleVdFloat32: + f = float64(math.Float32frombits(d.r.readUint32())) + case simpleVdFloat64: + f = math.Float64frombits(d.r.readUint64()) + default: if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - f = float64(d.DecodeInt64()) + _, i, _ := d.decIntAny() + f = float64(i) } else { - d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd) - return + decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) } } + checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdTrue { +func (d *simpleDecDriver) decodeBool() (b bool) { + switch d.bd { + case simpleVdTrue: b = true - } else if d.bd == simpleVdFalse { - } else { - d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd) - return + case simpleVdFalse: + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) } d.bdRead = false return } -func (d *simpleDecDriver) ReadMapStart() (length int) { - if !d.bdRead { - d.readNextBd() - } +func (d *simpleDecDriver) readMapLen() (length int) { d.bdRead = false - d.c = containerMapStart return d.decLen() } -func (d *simpleDecDriver) ReadArrayStart() (length int) { - if !d.bdRead { - d.readNextBd() - } +func (d *simpleDecDriver) readArrayLen() (length int) { d.bdRead = false - d.c = containerArrayStart return d.decLen() } -func (d *simpleDecDriver) ReadArrayElem() { - d.c = containerArrayElem -} - -func (d *simpleDecDriver) ReadArrayEnd() { - d.c = containerArrayEnd -} - -func (d *simpleDecDriver) ReadMapElemKey() { - d.c = containerMapKey -} - -func (d *simpleDecDriver) ReadMapElemValue() { - d.c = containerMapValue -} - -func (d *simpleDecDriver) ReadMapEnd() { - d.c = containerMapEnd -} - func (d *simpleDecDriver) decLen() int { switch d.bd % 8 { case 0: @@ -410,196 +305,116 @@ func (d *simpleDecDriver) decLen() int { case 1: return int(d.r.readn1()) case 2: - return int(bigen.Uint16(d.r.readx(2))) + return int(d.r.readUint16()) case 3: - ui := uint64(bigen.Uint32(d.r.readx(4))) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("overflow integer: %v", ui) - return 0 - } + ui := uint64(d.r.readUint32()) + checkOverflow(ui, 0, intBitsize) return int(ui) case 4: - ui := bigen.Uint64(d.r.readx(8)) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("overflow integer: %v", ui) - return 0 - } + ui := d.r.readUint64() + checkOverflow(ui, 0, intBitsize) return int(ui) } - d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) return -1 } -func (d *simpleDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.d.b[:], true)) -} - -func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { - return d.DecodeBytes(d.d.b[:], true) -} - -func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - d.bdRead = false - return - } - // check if an "array" of uint8's (see ContainerType for how to infer if an array) - if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 { - if len(bs) == 0 && zerocopy { - bs = d.d.b[:] - } - bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) - return - } - - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(uint(clen)) - } else if len(bs) == 0 { - bs = d.d.b[:] - } - } - return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) -} - -func (d *simpleDecDriver) DecodeTime() (t time.Time) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - d.bdRead = false - return - } - if d.bd != simpleVdTime { - d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd) - return - } +func (d *simpleDecDriver) decodeString() (s string) { + s = string(d.r.readn(d.decLen())) d.bdRead = false - clen := int(d.r.readn1()) - b := d.r.readx(uint(clen)) - if err := (&t).UnmarshalBinary(b); err != nil { - d.d.errorv(err) - } return } -func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) +func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + if clen := d.decLen(); clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) } + d.bdRead = false return } -func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } +func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { switch d.bd { case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: l := d.decLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) } - if d.br { - xbs = d.r.readx(uint(l)) - } else { - xbs = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) - } - case simpleVdByteArray, simpleVdByteArray + 1, - simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs = d.DecodeBytes(nil, true) + xbs = d.r.readn(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs, _ = d.decodeBytes(nil) default: - d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd) - return + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) } d.bdRead = false return } -func (d *simpleDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := d.d.naked() - var decodeFurther bool +func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() switch d.bd { case simpleVdNil: - n.v = valueTypeNil + vt = valueTypeNil case simpleVdFalse: - n.v = valueTypeBool - n.b = false + vt = valueTypeBool + v = false case simpleVdTrue: - n.v = valueTypeBool - n.b = true + vt = valueTypeBool + v = true case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt64() - } else { - n.v = valueTypeUint - n.u = d.DecodeUint64() - } + vt = valueTypeUint + ui, _, _ := d.decIntAny() + v = ui case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - n.v = valueTypeInt - n.i = d.DecodeInt64() + vt = valueTypeInt + _, i, _ := d.decIntAny() + v = i case simpleVdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat64() + vt = valueTypeFloat + v = d.decodeFloat(true) case simpleVdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat64() - case simpleVdTime: - n.v = valueTypeTime - n.t = d.DecodeTime() - case simpleVdString, simpleVdString + 1, - simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - n.v = valueTypeString - n.s = d.DecodeString() - case simpleVdByteArray, simpleVdByteArray + 1, - simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + vt = valueTypeFloat + v = d.decodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + vt = valueTypeString + v = d.decodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - n.v = valueTypeExt + vt = valueTypeExt l := d.decLen() - n.u = uint64(d.r.readn1()) - if d.br { - n.l = d.r.readx(uint(l)) - } else { - n.l = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) - } - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, - simpleVdArray + 3, simpleVdArray + 4: - n.v = valueTypeArray + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + vt = valueTypeArray decodeFurther = true case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - n.v = valueTypeMap + vt = valueTypeMap decodeFurther = true default: - d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd) + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) } if !decodeFurther { d.bdRead = false } + return } //------------------------------------ @@ -607,12 +422,12 @@ func (d *simpleDecDriver) DecodeNaked() { // SimpleHandle is a Handle for a very simple encoding format. // // simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceded by the descriptor byte (bd) +// - Encoding of a value is always preceeded by the descriptor byte (bd) // - True, false, nil are encoded fully in 1 byte (the descriptor) // - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). // There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. // - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Length of containers (strings, bytes, array, map, extensions) +// - Lenght of containers (strings, bytes, array, map, extensions) // are encoded in 0, 1, 2, 4 or 8 bytes. // Zero-length containers have no length encoded. // For others, the number of bytes is given by pow(2, bd%3) @@ -620,46 +435,26 @@ func (d *simpleDecDriver) DecodeNaked() { // - arrays are encoded as [bd] [length] [value]... // - extensions are encoded as [bd] [length] [tag] [byte]... // - strings/bytearrays are encoded as [bd] [length] [byte]... -// - time.Time are encoded as [bd] [length] [byte]... // // The full spec will be published soon. type SimpleHandle struct { BasicHandle - binaryEncodingType - noElemSeparators - // EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil - EncZeroValuesAsNil bool - - // _ [1]uint64 // padding } -// Name returns the name of the handle: simple -func (h *SimpleHandle) Name() string { return "simple" } - -// SetBytesExt sets an extension -func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) -} - -func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX - -func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { - return &simpleEncDriver{e: e, w: e.w, h: h} +func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { + return &simpleEncDriver{w: w, h: h} } -func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { - return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes} +func (h *SimpleHandle) newDecDriver(r decReader) decDriver { + return &simpleDecDriver{r: r, h: h} } -func (e *simpleEncDriver) reset() { - e.c = 0 - e.w = e.e.w +func (_ *SimpleHandle) writeExt() bool { + return true } -func (d *simpleDecDriver) reset() { - d.c = 0 - d.r, d.br = d.d.r, d.d.bytes - d.bd, d.bdRead = 0, false +func (h *SimpleHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle } var _ decDriver = (*simpleDecDriver)(nil) diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json b/src/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json deleted file mode 100644 index 90285867..00000000 --- a/src/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json +++ /dev/null @@ -1,639 +0,0 @@ -[ - { - "cbor": "AA==", - "hex": "00", - "roundtrip": true, - "decoded": 0 - }, - { - "cbor": "AQ==", - "hex": "01", - "roundtrip": true, - "decoded": 1 - }, - { - "cbor": "Cg==", - "hex": "0a", - "roundtrip": true, - "decoded": 10 - }, - { - "cbor": "Fw==", - "hex": "17", - "roundtrip": true, - "decoded": 23 - }, - { - "cbor": "GBg=", - "hex": "1818", - "roundtrip": true, - "decoded": 24 - }, - { - "cbor": "GBk=", - "hex": "1819", - "roundtrip": true, - "decoded": 25 - }, - { - "cbor": "GGQ=", - "hex": "1864", - "roundtrip": true, - "decoded": 100 - }, - { - "cbor": "GQPo", - "hex": "1903e8", - "roundtrip": true, - "decoded": 1000 - }, - { - "cbor": "GgAPQkA=", - "hex": "1a000f4240", - "roundtrip": true, - "decoded": 1000000 - }, - { - "cbor": "GwAAAOjUpRAA", - "hex": "1b000000e8d4a51000", - "roundtrip": true, - "decoded": 1000000000000 - }, - { - "cbor": "G///////////", - "hex": "1bffffffffffffffff", - "roundtrip": true, - "decoded": 18446744073709551615 - }, - { - "cbor": "wkkBAAAAAAAAAAA=", - "hex": "c249010000000000000000", - "roundtrip": true, - "decoded": 18446744073709551616 - }, - { - "cbor": "O///////////", - "hex": "3bffffffffffffffff", - "roundtrip": true, - "decoded": -18446744073709551616, - "skip": true - }, - { - "cbor": "w0kBAAAAAAAAAAA=", - "hex": "c349010000000000000000", - "roundtrip": true, - "decoded": -18446744073709551617 - }, - { - "cbor": "IA==", - "hex": "20", - "roundtrip": true, - "decoded": -1 - }, - { - "cbor": "KQ==", - "hex": "29", - "roundtrip": true, - "decoded": -10 - }, - { - "cbor": "OGM=", - "hex": "3863", - "roundtrip": true, - "decoded": -100 - }, - { - "cbor": "OQPn", - "hex": "3903e7", - "roundtrip": true, - "decoded": -1000 - }, - { - "cbor": "+QAA", - "hex": "f90000", - "roundtrip": true, - "decoded": 0.0 - }, - { - "cbor": "+YAA", - "hex": "f98000", - "roundtrip": true, - "decoded": -0.0 - }, - { - "cbor": "+TwA", - "hex": "f93c00", - "roundtrip": true, - "decoded": 1.0 - }, - { - "cbor": "+z/xmZmZmZma", - "hex": "fb3ff199999999999a", - "roundtrip": true, - "decoded": 1.1 - }, - { - "cbor": "+T4A", - "hex": "f93e00", - "roundtrip": true, - "decoded": 1.5 - }, - { - "cbor": "+Xv/", - "hex": "f97bff", - "roundtrip": true, - "decoded": 65504.0 - }, - { - "cbor": "+kfDUAA=", - "hex": "fa47c35000", - "roundtrip": true, - "decoded": 100000.0 - }, - { - "cbor": "+n9///8=", - "hex": "fa7f7fffff", - "roundtrip": true, - "decoded": 3.4028234663852886e+38 - }, - { - "cbor": "+3435DyIAHWc", - "hex": "fb7e37e43c8800759c", - "roundtrip": true, - "decoded": 1.0e+300 - }, - { - "cbor": "+QAB", - "hex": "f90001", - "roundtrip": true, - "decoded": 5.960464477539063e-08 - }, - { - "cbor": "+QQA", - "hex": "f90400", - "roundtrip": true, - "decoded": 6.103515625e-05 - }, - { - "cbor": "+cQA", - "hex": "f9c400", - "roundtrip": true, - "decoded": -4.0 - }, - { - "cbor": "+8AQZmZmZmZm", - "hex": "fbc010666666666666", - "roundtrip": true, - "decoded": -4.1 - }, - { - "cbor": "+XwA", - "hex": "f97c00", - "roundtrip": true, - "diagnostic": "Infinity" - }, - { - "cbor": "+X4A", - "hex": "f97e00", - "roundtrip": true, - "diagnostic": "NaN" - }, - { - "cbor": "+fwA", - "hex": "f9fc00", - "roundtrip": true, - "diagnostic": "-Infinity" - }, - { - "cbor": "+n+AAAA=", - "hex": "fa7f800000", - "roundtrip": false, - "diagnostic": "Infinity" - }, - { - "cbor": "+n/AAAA=", - "hex": "fa7fc00000", - "roundtrip": false, - "diagnostic": "NaN" - }, - { - "cbor": "+v+AAAA=", - "hex": "faff800000", - "roundtrip": false, - "diagnostic": "-Infinity" - }, - { - "cbor": "+3/wAAAAAAAA", - "hex": "fb7ff0000000000000", - "roundtrip": false, - "diagnostic": "Infinity" - }, - { - "cbor": "+3/4AAAAAAAA", - "hex": "fb7ff8000000000000", - "roundtrip": false, - "diagnostic": "NaN" - }, - { - "cbor": "+//wAAAAAAAA", - "hex": "fbfff0000000000000", - "roundtrip": false, - "diagnostic": "-Infinity" - }, - { - "cbor": "9A==", - "hex": "f4", - "roundtrip": true, - "decoded": false - }, - { - "cbor": "9Q==", - "hex": "f5", - "roundtrip": true, - "decoded": true - }, - { - "cbor": "9g==", - "hex": "f6", - "roundtrip": true, - "decoded": null - }, - { - "cbor": "9w==", - "hex": "f7", - "roundtrip": true, - "diagnostic": "undefined" - }, - { - "cbor": "8A==", - "hex": "f0", - "roundtrip": true, - "diagnostic": "simple(16)" - }, - { - "cbor": "+Bg=", - "hex": "f818", - "roundtrip": true, - "diagnostic": "simple(24)" - }, - { - "cbor": "+P8=", - "hex": "f8ff", - "roundtrip": true, - "diagnostic": "simple(255)" - }, - { - "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", - "hex": "c074323031332d30332d32315432303a30343a30305a", - "roundtrip": true, - "diagnostic": "0(\"2013-03-21T20:04:00Z\")" - }, - { - "cbor": "wRpRS2ew", - "hex": "c11a514b67b0", - "roundtrip": true, - "diagnostic": "1(1363896240)" - }, - { - "cbor": "wftB1FLZ7CAAAA==", - "hex": "c1fb41d452d9ec200000", - "roundtrip": true, - "diagnostic": "1(1363896240.5)" - }, - { - "cbor": "10QBAgME", - "hex": "d74401020304", - "roundtrip": true, - "diagnostic": "23(h'01020304')" - }, - { - "cbor": "2BhFZElFVEY=", - "hex": "d818456449455446", - "roundtrip": true, - "diagnostic": "24(h'6449455446')" - }, - { - "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", - "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", - "roundtrip": true, - "diagnostic": "32(\"http://www.example.com\")" - }, - { - "cbor": "QA==", - "hex": "40", - "roundtrip": true, - "diagnostic": "h''" - }, - { - "cbor": "RAECAwQ=", - "hex": "4401020304", - "roundtrip": true, - "diagnostic": "h'01020304'" - }, - { - "cbor": "YA==", - "hex": "60", - "roundtrip": true, - "decoded": "" - }, - { - "cbor": "YWE=", - "hex": "6161", - "roundtrip": true, - "decoded": "a" - }, - { - "cbor": "ZElFVEY=", - "hex": "6449455446", - "roundtrip": true, - "decoded": "IETF" - }, - { - "cbor": "YiJc", - "hex": "62225c", - "roundtrip": true, - "decoded": "\"\\" - }, - { - "cbor": "YsO8", - "hex": "62c3bc", - "roundtrip": true, - "decoded": "ΓΌ" - }, - { - "cbor": "Y+awtA==", - "hex": "63e6b0b4", - "roundtrip": true, - "decoded": "ζ°΄" - }, - { - "cbor": "ZPCQhZE=", - "hex": "64f0908591", - "roundtrip": true, - "decoded": "𐅑" - }, - { - "cbor": "gA==", - "hex": "80", - "roundtrip": true, - "decoded": [ - - ] - }, - { - "cbor": "gwECAw==", - "hex": "83010203", - "roundtrip": true, - "decoded": [ - 1, - 2, - 3 - ] - }, - { - "cbor": "gwGCAgOCBAU=", - "hex": "8301820203820405", - "roundtrip": true, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", - "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", - "roundtrip": true, - "decoded": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25 - ] - }, - { - "cbor": "oA==", - "hex": "a0", - "roundtrip": true, - "decoded": { - } - }, - { - "cbor": "ogECAwQ=", - "hex": "a201020304", - "roundtrip": true, - "skip": true, - "diagnostic": "{1: 2, 3: 4}" - }, - { - "cbor": "omFhAWFiggID", - "hex": "a26161016162820203", - "roundtrip": true, - "decoded": { - "a": 1, - "b": [ - 2, - 3 - ] - } - }, - { - "cbor": "gmFhoWFiYWM=", - "hex": "826161a161626163", - "roundtrip": true, - "decoded": [ - "a", - { - "b": "c" - } - ] - }, - { - "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", - "hex": "a56161614161626142616361436164614461656145", - "roundtrip": true, - "decoded": { - "a": "A", - "b": "B", - "c": "C", - "d": "D", - "e": "E" - } - }, - { - "cbor": "X0IBAkMDBAX/", - "hex": "5f42010243030405ff", - "roundtrip": false, - "skip": true, - "diagnostic": "(_ h'0102', h'030405')" - }, - { - "cbor": "f2VzdHJlYWRtaW5n/w==", - "hex": "7f657374726561646d696e67ff", - "roundtrip": false, - "decoded": "streaming" - }, - { - "cbor": "n/8=", - "hex": "9fff", - "roundtrip": false, - "decoded": [ - - ] - }, - { - "cbor": "nwGCAgOfBAX//w==", - "hex": "9f018202039f0405ffff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "nwGCAgOCBAX/", - "hex": "9f01820203820405ff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "gwGCAgOfBAX/", - "hex": "83018202039f0405ff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "gwGfAgP/ggQF", - "hex": "83019f0203ff820405", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", - "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", - "roundtrip": false, - "decoded": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25 - ] - }, - { - "cbor": "v2FhAWFinwID//8=", - "hex": "bf61610161629f0203ffff", - "roundtrip": false, - "decoded": { - "a": 1, - "b": [ - 2, - 3 - ] - } - }, - { - "cbor": "gmFhv2FiYWP/", - "hex": "826161bf61626163ff", - "roundtrip": false, - "decoded": [ - "a", - { - "b": "c" - } - ] - }, - { - "cbor": "v2NGdW71Y0FtdCH/", - "hex": "bf6346756ef563416d7421ff", - "roundtrip": false, - "decoded": { - "Fun": true, - "Amt": -2 - } - } -] diff --git a/src/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/src/vendor/github.com/hashicorp/go-msgpack/codec/time.go new file mode 100644 index 00000000..c86d6532 --- /dev/null +++ b/src/vendor/github.com/hashicorp/go-msgpack/codec/time.go @@ -0,0 +1,193 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "time" +) + +var ( + timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +) + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/src/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/src/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index a86c8539..5673773b 100644 --- a/src/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/src/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -73,9 +73,6 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) { func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) - if ent.Value.(*entry) == nil { - return nil, false - } return ent.Value.(*entry).value, true } return @@ -145,19 +142,6 @@ func (c *LRU) Len() int { return c.evictList.Len() } -// Resize changes the cache size. -func (c *LRU) Resize(size int) (evicted int) { - diff := c.Len() - size - if diff < 0 { - diff = 0 - } - for i := 0; i < diff; i++ { - c.removeOldest() - } - c.size = size - return diff -} - // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() diff --git a/src/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/src/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index 92d70934..74c70774 100644 --- a/src/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/src/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -10,7 +10,7 @@ type LRUCache interface { // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) - // Checks if a key exists in cache without updating the recent-ness. + // Check if a key exsists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. @@ -31,9 +31,6 @@ type LRUCache interface { // Returns the number of items in the cache. Len() int - // Clears all cache entries. + // Clear all cache entries Purge() - - // Resizes cache, returning number evicted - Resize(int) int } diff --git a/src/vendor/github.com/nats-io/nats.go/.travis.yml b/src/vendor/github.com/nats-io/nats.go/.travis.yml index aa87e139..1d05d3f8 100644 --- a/src/vendor/github.com/nats-io/nats.go/.travis.yml +++ b/src/vendor/github.com/nats-io/nats.go/.travis.yml @@ -7,7 +7,7 @@ install: - go get -t ./... - go get github.com/mattn/goveralls - go get github.com/wadey/gocovmerge -- go get -u honnef.co/go/tools/cmd/staticcheck +- go install honnef.co/go/tools/cmd/staticcheck@v0.2.2 - go get -u github.com/client9/misspell/cmd/misspell before_script: - $(exit $(go fmt ./... | wc -l)) diff --git a/src/vendor/github.com/nats-io/nats.go/.words b/src/vendor/github.com/nats-io/nats.go/.words index 63fba00c..24be7f62 100644 --- a/src/vendor/github.com/nats-io/nats.go/.words +++ b/src/vendor/github.com/nats-io/nats.go/.words @@ -1,26 +1,9 @@ 1 -## ^^^ size estimate, just needs to be non-zero -## -## The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries but populates the dictionary with identifiers from the Go source. -## -## -## Comment lines are not actually parsed as comments, try to be careful ... but in practice they seem to work? -## -## We assume en_US hunspell dictionaries are installed and used. The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff) -## -## words which are in the base dictionary can't have extra affix rules added to them, so we have to start with the affixed variant we want to add. -## thus creds rather than cred/S and so on -## So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants, we have to use unreceive as the stem. -## We can't define our own affix or compound rules, to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2} - -## People involved who are referenced in todo/fixmes + derek dlc ivan -## Legitimate spellings in non-US English dialects; -## regular-if-rarer words just missing from the dictionary; -## variants of words not covered by hunspell en_US rules. acknowledgement/SM arity deduplication/S @@ -31,31 +14,12 @@ observable/S redelivery/S retransmitting retry/SB -unmarshal/SDG - -# I think that retry, being added as a symbol, is precluding the re-addition here with affix rules, -# so "retry/SB" above is ignored -retries -retryable - -## Things gospel doesn't pick up, but doesn't yet; I've filed -## Eg, plurals of non-collection types, or wire-format encodings in a struct field's tag -AsyncSubscriptions -ChanSubscriptions -PubAckFutures -SubOpts -SyncSubscriptions -no_wait - -## Conceptual nouns not actually in the source, describing state + SlowConsumer -## Symbols from elsewhere referred to in comments but not referenced in the code, so not currently surfaced by gospel as acceptable AppendInt ReadMIMEHeader -## The rest - clientProtoZero jetstream v1 @@ -83,8 +47,6 @@ tm todo unsub/S -## The spelling tokenizer doesn't take "permessage-deflate" as allowing for ... "permessage-deflate", -## which is an RFC7692 registered extension. We have to explicitly list "permessage". permessage permessage-deflate urlA diff --git a/src/vendor/github.com/nats-io/nats.go/.words.readme b/src/vendor/github.com/nats-io/nats.go/.words.readme new file mode 100644 index 00000000..9d9f5cbb --- /dev/null +++ b/src/vendor/github.com/nats-io/nats.go/.words.readme @@ -0,0 +1,25 @@ +The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries +but populates the dictionary with identifiers from the Go source. + + + +Alas, no comments are allowed in the .words file and newer versions of gospel +error out on seeing them. This is really a hunspell restriction. + +We assume en_US hunspell dictionaries are installed and used. +The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff) +Invoke `hunspell -D` to see the actual locations. + +Words which are in the base dictionary can't have extra affix rules added to +them, so we have to start with the affixed variant we want to add. +Thus `creds` rather than `cred/S` and so on. + +So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants, +we have to use unreceive as the stem. + +We can't define our own affix or compound rules, +to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2} + +The spelling tokenizer doesn't take "permessage-deflate" as allowing for ... +"permessage-deflate", which is an RFC7692 registered extension for websockets. +We have to explicitly list "permessage". diff --git a/src/vendor/github.com/nats-io/nats.go/README.md b/src/vendor/github.com/nats-io/nats.go/README.md index adaf600a..766ff2df 100644 --- a/src/vendor/github.com/nats-io/nats.go/README.md +++ b/src/vendor/github.com/nats-io/nats.go/README.md @@ -29,7 +29,7 @@ When using or transitioning to Go modules support: ```bash # Go client latest or explicit version go get github.com/nats-io/nats.go/@latest -go get github.com/nats-io/nats.go/@v1.13.0 +go get github.com/nats-io/nats.go/@v1.16.0 # For latest NATS Server, add /v2 at the end go get github.com/nats-io/nats-server/v2 diff --git a/src/vendor/github.com/nats-io/nats.go/context.go b/src/vendor/github.com/nats-io/nats.go/context.go index 037668fb..300b6ebb 100644 --- a/src/vendor/github.com/nats-io/nats.go/context.go +++ b/src/vendor/github.com/nats-io/nats.go/context.go @@ -21,20 +21,13 @@ import ( // RequestMsgWithContext takes a context, a subject and payload // in bytes and request expecting a single response. func (nc *Conn) RequestMsgWithContext(ctx context.Context, msg *Msg) (*Msg, error) { - var hdr []byte - var err error - - if len(msg.Header) > 0 { - if !nc.info.Headers { - return nil, ErrHeadersNotSupported - } - - hdr, err = msg.headerBytes() - if err != nil { - return nil, err - } + if msg == nil { + return nil, ErrInvalidMsg + } + hdr, err := msg.headerBytes() + if err != nil { + return nil, err } - return nc.requestWithContext(ctx, msg.Subject, hdr, msg.Data) } diff --git a/src/vendor/github.com/nats-io/nats.go/go_test.mod b/src/vendor/github.com/nats-io/nats.go/go_test.mod index a9a586e3..ec124c91 100644 --- a/src/vendor/github.com/nats-io/nats.go/go_test.mod +++ b/src/vendor/github.com/nats-io/nats.go/go_test.mod @@ -4,17 +4,17 @@ go 1.17 require ( github.com/golang/protobuf v1.4.2 - github.com/nats-io/nats-server/v2 v2.7.2 + github.com/nats-io/nats-server/v2 v2.8.4 github.com/nats-io/nkeys v0.3.0 github.com/nats-io/nuid v1.0.1 google.golang.org/protobuf v1.23.0 ) require ( - github.com/klauspost/compress v1.13.4 // indirect - github.com/minio/highwayhash v1.0.1 // indirect - github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 // indirect - golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce // indirect + github.com/klauspost/compress v1.14.4 // indirect + github.com/minio/highwayhash v1.0.2 // indirect + github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a // indirect + golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect golang.org/x/sys v0.0.0-20220111092808-5a964db01320 // indirect golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect ) diff --git a/src/vendor/github.com/nats-io/nats.go/go_test.sum b/src/vendor/github.com/nats-io/nats.go/go_test.sum index f7a98a56..9b1cf6df 100644 --- a/src/vendor/github.com/nats-io/nats.go/go_test.sum +++ b/src/vendor/github.com/nats-io/nats.go/go_test.sum @@ -5,28 +5,26 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= -github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY= -github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= -github.com/nats-io/nats-server/v2 v2.7.2 h1:+LEN8m0+jdCkiGc884WnDuxR+qj80/5arj+szKuRpRI= -github.com/nats-io/nats-server/v2 v2.7.2/go.mod h1:tckmrt0M6bVaDT3kmh9UrIq/CBOBBse+TpXQi5ldaa8= -github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/klauspost/compress v1.14.4 h1:eijASRJcobkVtSt81Olfh7JX43osYLwy5krOJo6YEu4= +github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4= +github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4= +github.com/nats-io/nats.go v1.15.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI= -golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/src/vendor/github.com/nats-io/nats.go/js.go b/src/vendor/github.com/nats-io/nats.go/js.go index 9f36d909..dafa53ab 100644 --- a/src/vendor/github.com/nats-io/nats.go/js.go +++ b/src/vendor/github.com/nats-io/nats.go/js.go @@ -181,6 +181,15 @@ const ( // routine. Without this, the subscription would possibly stall until // a new message or heartbeat/fc are received. chanSubFCCheckInterval = 250 * time.Millisecond + + // Default time wait between retries on Publish iff err is NoResponders. + DefaultPubRetryWait = 250 * time.Millisecond + + // Default number of retries + DefaultPubRetryAttempts = 2 + + // defaultAsyncPubAckInflight is the number of async pub acks inflight. + defaultAsyncPubAckInflight = 4000 ) // Types of control messages, so far heartbeat and flow control @@ -212,12 +221,12 @@ type jsOpts struct { wait time.Duration // For async publish error handling. aecb MsgErrHandler - // Maximum in flight. - maxap int + // Max async pub ack in flight + maxpa int // the domain that produced the pre domain string // enables protocol tracing - trace TraceCB + ctrace ClientTrace shouldTrace bool } @@ -232,8 +241,9 @@ func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) { js := &js{ nc: nc, opts: &jsOpts{ - pre: defaultAPIPrefix, - wait: defaultRequestWait, + pre: defaultAPIPrefix, + wait: defaultRequestWait, + maxpa: defaultAsyncPubAckInflight, }, } @@ -257,26 +267,16 @@ func (opt jsOptFn) configureJSContext(opts *jsOpts) error { return opt(opts) } -// TraceOperation indicates the direction of traffic flow to TraceCB -type TraceOperation int - -const ( - // TraceSent indicate the payload is being sent to subj - TraceSent TraceOperation = 0 - // TraceReceived indicate the payload is being received on subj - TraceReceived TraceOperation = 1 -) - -// TraceCB is called to trace API interactions for the JetStream Context -type TraceCB func(op TraceOperation, subj string, payload []byte, hdr Header) +// ClientTrace can be used to trace API interactions for the JetStream Context. +type ClientTrace struct { + RequestSent func(subj string, payload []byte) + ResponseReceived func(subj string, payload []byte, hdr Header) +} -// TraceFunc enables tracing of JetStream API interactions -func TraceFunc(cb TraceCB) JSOpt { - return jsOptFn(func(js *jsOpts) error { - js.trace = cb - js.shouldTrace = true - return nil - }) +func (ct ClientTrace) configureJSContext(js *jsOpts) error { + js.ctrace = ct + js.shouldTrace = true + return nil } // Domain changes the domain part of JetStream API prefix. @@ -336,10 +336,17 @@ type pubOpts struct { ctx context.Context ttl time.Duration id string - lid string // Expected last msgId - str string // Expected stream name - seq uint64 // Expected last sequence - lss uint64 // Expected last sequence per subject + lid string // Expected last msgId + str string // Expected stream name + seq *uint64 // Expected last sequence + lss *uint64 // Expected last sequence per subject + + // Publish retries for NoResponders err. + rwait time.Duration // Retry wait between attempts + rnum int // Retry attempts + + // stallWait is the max wait of a async pub ack. + stallWait time.Duration } // pubAckResponse is the ack response from the JetStream API when publishing a message. @@ -366,6 +373,13 @@ const ( MsgRollup = "Nats-Rollup" ) +// Headers for republished messages. +const ( + JSStream = "Nats-Stream" + JSSequence = "Nats-Sequence" + JSLastSequence = "Nats-Last-Sequence" +) + // MsgSize is a header that will be part of a consumer's delivered message if HeadersOnly requested. const MsgSize = "Nats-Msg-Size" @@ -377,7 +391,7 @@ const ( // PublishMsg publishes a Msg to a stream from JetStream. func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { - var o pubOpts + var o = pubOpts{rwait: DefaultPubRetryWait, rnum: DefaultPubRetryAttempts} if len(opts) > 0 { if m.Header == nil { m.Header = Header{} @@ -395,6 +409,9 @@ func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { if o.ttl == 0 && o.ctx == nil { o.ttl = js.opts.wait } + if o.stallWait > 0 { + return nil, fmt.Errorf("nats: stall wait cannot be set to sync publish") + } if o.id != _EMPTY_ { m.Header.Set(MsgIdHdr, o.id) @@ -405,11 +422,11 @@ func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { if o.str != _EMPTY_ { m.Header.Set(ExpectedStreamHdr, o.str) } - if o.seq > 0 { - m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(o.seq, 10)) + if o.seq != nil { + m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) } - if o.lss > 0 { - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.lss, 10)) + if o.lss != nil { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) } var resp *Msg @@ -422,11 +439,35 @@ func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { } if err != nil { - if err == ErrNoResponders { - err = ErrNoStreamResponse + for r, ttl := 0, o.ttl; err == ErrNoResponders && (r < o.rnum || o.rnum < 0); r++ { + // To protect against small blips in leadership changes etc, if we get a no responders here retry. + if o.ctx != nil { + select { + case <-o.ctx.Done(): + case <-time.After(o.rwait): + } + } else { + time.Sleep(o.rwait) + } + if o.ttl > 0 { + ttl -= o.rwait + if ttl <= 0 { + err = ErrTimeout + break + } + resp, err = js.nc.RequestMsg(m, time.Duration(ttl)) + } else { + resp, err = js.nc.RequestMsgWithContext(o.ctx, m) + } + } + if err != nil { + if err == ErrNoResponders { + err = ErrNoStreamResponse + } + return nil, err } - return nil, err } + var pa pubAckResponse if err := json.Unmarshal(resp.Data, &pa); err != nil { return nil, ErrInvalidJSAck @@ -546,9 +587,9 @@ func (js *js) registerPAF(id string, paf *pubAckFuture) (int, int) { paf.js = js js.pafs[id] = paf np := len(js.pafs) - maxap := js.opts.maxap + maxpa := js.opts.maxpa js.mu.Unlock() - return np, maxap + return np, maxpa } // Lock should be held. @@ -600,7 +641,7 @@ func (js *js) handleAsyncReply(m *Msg) { delete(js.pafs, id) // Check on anyone stalled and waiting. - if js.stc != nil && len(js.pafs) < js.opts.maxap { + if js.stc != nil && len(js.pafs) < js.opts.maxpa { close(js.stc) js.stc = nil } @@ -671,7 +712,7 @@ func PublishAsyncMaxPending(max int) JSOpt { if max < 1 { return errors.New("nats: max ack pending should be >= 1") } - js.maxap = max + js.maxpa = max return nil }) } @@ -681,6 +722,8 @@ func (js *js) PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFutu return js.PublishMsgAsync(&Msg{Subject: subj, Data: data}, opts...) } +const defaultStallWait = 200 * time.Millisecond + func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { var o pubOpts if len(opts) > 0 { @@ -698,6 +741,10 @@ func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { if o.ttl != 0 || o.ctx != nil { return nil, ErrContextAndTimeout } + stallWait := defaultStallWait + if o.stallWait > 0 { + stallWait = o.stallWait + } // FIXME(dlc) - Make common. if o.id != _EMPTY_ { @@ -709,11 +756,11 @@ func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { if o.str != _EMPTY_ { m.Header.Set(ExpectedStreamHdr, o.str) } - if o.seq > 0 { - m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(o.seq, 10)) + if o.seq != nil { + m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) } - if o.lss > 0 { - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.lss, 10)) + if o.lss != nil { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) } // Reply @@ -735,7 +782,7 @@ func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { if maxPending > 0 && numPending >= maxPending { select { case <-js.asyncStall(): - case <-time.After(200 * time.Millisecond): + case <-time.After(stallWait): js.clearPAF(id) return nil, errors.New("nats: stalled with too many outstanding async published messages") } @@ -782,7 +829,7 @@ func ExpectStream(stream string) PubOpt { // ExpectLastSequence sets the expected sequence in the response from the publish. func ExpectLastSequence(seq uint64) PubOpt { return pubOptFn(func(opts *pubOpts) error { - opts.seq = seq + opts.seq = &seq return nil }) } @@ -790,7 +837,7 @@ func ExpectLastSequence(seq uint64) PubOpt { // ExpectLastSequencePerSubject sets the expected sequence per subject in the response from the publish. func ExpectLastSequencePerSubject(seq uint64) PubOpt { return pubOptFn(func(opts *pubOpts) error { - opts.lss = seq + opts.lss = &seq return nil }) } @@ -803,6 +850,33 @@ func ExpectLastMsgId(id string) PubOpt { }) } +// RetryWait sets the retry wait time when ErrNoResponders is encountered. +func RetryWait(dur time.Duration) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.rwait = dur + return nil + }) +} + +// RetryAttempts sets the retry number of attempts when ErrNoResponders is encountered. +func RetryAttempts(num int) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.rnum = num + return nil + }) +} + +// StallWait sets the max wait when the producer becomes stall producing messages. +func StallWait(ttl time.Duration) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + if ttl <= 0 { + return fmt.Errorf("nats: stall wait should be more than 0") + } + opts.stallWait = ttl + return nil + }) +} + type ackOpts struct { ttl time.Duration ctx context.Context @@ -894,8 +968,6 @@ func (d nakDelay) configureAck(opts *ackOpts) error { type ConsumerConfig struct { Durable string `json:"durable_name,omitempty"` Description string `json:"description,omitempty"` - DeliverSubject string `json:"deliver_subject,omitempty"` - DeliverGroup string `json:"deliver_group,omitempty"` DeliverPolicy DeliverPolicy `json:"deliver_policy"` OptStartSeq uint64 `json:"opt_start_seq,omitempty"` OptStartTime *time.Time `json:"opt_start_time,omitempty"` @@ -917,8 +989,17 @@ type ConsumerConfig struct { MaxRequestBatch int `json:"max_batch,omitempty"` MaxRequestExpires time.Duration `json:"max_expires,omitempty"` + // Push based consumers. + DeliverSubject string `json:"deliver_subject,omitempty"` + DeliverGroup string `json:"deliver_group,omitempty"` + // Ephemeral inactivity threshold. InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` + + // Generally inherited by parent stream and other markers, now can be configured directly. + Replicas int `json:"num_replicas"` + // Force memory storage. + MemoryStorage bool `json:"mem_storage,omitempty"` } // ConsumerInfo is the info from a JetStream consumer. @@ -1476,7 +1557,10 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync, } if js.opts.shouldTrace { - js.opts.trace(TraceSent, ccSubj, j, nil) + ctrace := js.opts.ctrace + if ctrace.RequestSent != nil { + ctrace.RequestSent(ccSubj, j) + } } resp, err := nc.Request(ccSubj, j, js.opts.wait) if err != nil { @@ -1487,7 +1571,10 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync, return nil, err } if js.opts.shouldTrace { - js.opts.trace(TraceReceived, ccSubj, resp.Data, resp.Header) + ctrace := js.opts.ctrace + if ctrace.ResponseReceived != nil { + ctrace.ResponseReceived(ccSubj, resp.Data, resp.Header) + } } var cinfo consumerResponse @@ -2186,6 +2273,14 @@ func RateLimit(n uint64) SubOpt { }) } +// BackOff is an array of time durations that represent the time to delay based on delivery count. +func BackOff(backOff []time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.BackOff = backOff + return nil + }) +} + // BindStream binds a consumer to a stream explicitly based on a name. // When a stream name is not specified, the library uses the subscribe // subject as a way to find the stream name. It is done by making a request @@ -2333,12 +2428,20 @@ func PullMaxWaiting(n int) SubOpt { }) } -var errNoMessages = errors.New("nats: no messages") +var ( + // errNoMessages is an error that a Fetch request using no_wait can receive to signal + // that there are no more messages available. + errNoMessages = errors.New("nats: no messages") + + // errRequestsPending is an error that represents a sub.Fetch requests that was using + // no_wait and expires time got discarded by the server. + errRequestsPending = errors.New("nats: requests pending") +) // Returns if the given message is a user message or not, and if // `checkSts` is true, returns appropriate error based on the // content of the status (404, etc..) -func checkMsg(msg *Msg, checkSts bool) (usrMsg bool, err error) { +func checkMsg(msg *Msg, checkSts, isNoWait bool) (usrMsg bool, err error) { // Assume user message usrMsg = true @@ -2367,11 +2470,17 @@ func checkMsg(msg *Msg, checkSts bool) (usrMsg bool, err error) { // 404 indicates that there are no messages. err = errNoMessages case reqTimeoutSts: - // Older servers may send a 408 when a request in the server was expired - // and interest is still found, which will be the case for our - // implementation. Regardless, ignore 408 errors until receiving at least - // one message. - err = ErrTimeout + // In case of a fetch request with no wait request and expires time, + // need to skip 408 errors and retry. + if isNoWait { + err = errRequestsPending + } else { + // Older servers may send a 408 when a request in the server was expired + // and interest is still found, which will be the case for our + // implementation. Regardless, ignore 408 errors until receiving at least + // one message when making requests without no_wait. + err = ErrTimeout + } default: err = fmt.Errorf("nats: %s", msg.Header.Get(descrHdr)) } @@ -2486,7 +2595,7 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) { // or status message, however, we don't care about values of status // messages at this point in the Fetch() call, so checkMsg can't // return an error. - if usrMsg, _ := checkMsg(msg, false); usrMsg { + if usrMsg, _ := checkMsg(msg, false, false); usrMsg { msgs = append(msgs, msg) } } @@ -2529,11 +2638,11 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) { if err == nil { var usrMsg bool - usrMsg, err = checkMsg(msg, true) + usrMsg, err = checkMsg(msg, true, noWait) if err == nil && usrMsg { msgs = append(msgs, msg) - } else if noWait && (err == errNoMessages) && len(msgs) == 0 { - // If we have a 404 for our "no_wait" request and have + } else if noWait && (err == errNoMessages || err == errRequestsPending) && len(msgs) == 0 { + // If we have a 404/408 for our "no_wait" request and have // not collected any message, then resend request to // wait this time. noWait = false @@ -2585,14 +2694,20 @@ func (js *js) getConsumerInfoContext(ctx context.Context, stream, consumer strin // a RequestWithContext with tracing via TraceCB func (js *js) apiRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { if js.opts.shouldTrace { - js.opts.trace(TraceSent, subj, data, nil) + ctrace := js.opts.ctrace + if ctrace.RequestSent != nil { + ctrace.RequestSent(subj, data) + } } resp, err := js.nc.RequestWithContext(ctx, subj, data) if err != nil { return nil, err } if js.opts.shouldTrace { - js.opts.trace(TraceReceived, subj, resp.Data, resp.Header) + ctrace := js.opts.ctrace + if ctrace.RequestSent != nil { + ctrace.ResponseReceived(subj, resp.Data, resp.Header) + } } return resp, nil diff --git a/src/vendor/github.com/nats-io/nats.go/jsm.go b/src/vendor/github.com/nats-io/nats.go/jsm.go index 87ab37ce..6bfff673 100644 --- a/src/vendor/github.com/nats-io/nats.go/jsm.go +++ b/src/vendor/github.com/nats-io/nats.go/jsm.go @@ -100,6 +100,15 @@ type StreamConfig struct { DenyDelete bool `json:"deny_delete,omitempty"` DenyPurge bool `json:"deny_purge,omitempty"` AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` + + // Allow republish of the message after being sequenced and stored. + RePublish *SubjectMapping `json:"republish,omitempty"` +} + +// SubjectMapping allows a source subject to be mapped to a destination subject for republishing. +type SubjectMapping struct { + Source string `json:"src,omitempty"` + Destination string `json:"dest"` } // Placement is used to guide placement of streams in clustered JetStream. @@ -229,6 +238,9 @@ type consumerResponse struct { // AddConsumer will add a JetStream consumer. func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err @@ -237,9 +249,6 @@ func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*C defer cancel() } - if stream == _EMPTY_ { - return nil, ErrStreamNameRequired - } req, err := json.Marshal(&createConsumerRequest{Stream: stream, Config: cfg}) if err != nil { return nil, err @@ -280,6 +289,9 @@ func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*C } func (js *js) UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } if cfg == nil { return nil, ErrConsumerConfigRequired } @@ -295,8 +307,34 @@ type consumerDeleteResponse struct { Success bool `json:"success,omitempty"` } +func checkStreamName(stream string) error { + if stream == _EMPTY_ { + return ErrStreamNameRequired + } + if strings.Contains(stream, ".") { + return ErrInvalidStreamName + } + return nil +} + +func checkConsumerName(consumer string) error { + if consumer == _EMPTY_ { + return ErrConsumerNameRequired + } + if strings.Contains(consumer, ".") { + return ErrInvalidConsumerName + } + return nil +} + // DeleteConsumer deletes a Consumer. func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error { + if err := checkStreamName(stream); err != nil { + return err + } + if err := checkConsumerName(consumer); err != nil { + return err + } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return err @@ -305,10 +343,6 @@ func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error { defer cancel() } - if stream == _EMPTY_ { - return ErrStreamNameRequired - } - dcSubj := js.apiSubj(fmt.Sprintf(apiConsumerDeleteT, stream, consumer)) r, err := js.apiRequestWithContext(o.ctx, dcSubj, nil) if err != nil { @@ -330,6 +364,12 @@ func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error { // ConsumerInfo returns information about a Consumer. func (js *js) ConsumerInfo(stream, consumer string, opts ...JSOpt) (*ConsumerInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } + if err := checkConsumerName(consumer); err != nil { + return nil, err + } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err @@ -369,8 +409,8 @@ func (c *consumerLister) Next() bool { if c.err != nil { return false } - if c.stream == _EMPTY_ { - c.err = ErrStreamNameRequired + if err := checkStreamName(c.stream); err != nil { + c.err = err return false } if c.pageInfo != nil && c.offset >= c.pageInfo.Total { @@ -474,8 +514,8 @@ func (c *consumerNamesLister) Next() bool { if c.err != nil { return false } - if c.stream == _EMPTY_ { - c.err = ErrStreamNameRequired + if err := checkStreamName(c.stream); err != nil { + c.err = err return false } if c.pageInfo != nil && c.offset >= c.pageInfo.Total { @@ -556,6 +596,12 @@ type streamCreateResponse struct { } func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { + if cfg == nil { + return nil, ErrStreamConfigRequired + } + if err := checkStreamName(cfg.Name); err != nil { + return nil, err + } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err @@ -564,14 +610,6 @@ func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { defer cancel() } - if cfg == nil || cfg.Name == _EMPTY_ { - return nil, ErrStreamNameRequired - } - - if strings.Contains(cfg.Name, ".") { - return nil, ErrInvalidStreamName - } - req, err := json.Marshal(cfg) if err != nil { return nil, err @@ -587,6 +625,9 @@ func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { return nil, err } if resp.Error != nil { + if resp.Error.ErrorCode == 10058 { + return nil, ErrStreamNameAlreadyInUse + } return nil, errors.New(resp.Error.Description) } @@ -596,10 +637,9 @@ func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { type streamInfoResponse = streamCreateResponse func (js *js) StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) { - if strings.Contains(stream, ".") { - return nil, ErrInvalidStreamName + if err := checkStreamName(stream); err != nil { + return nil, err } - o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err @@ -621,7 +661,7 @@ func (js *js) StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) { if resp.Error.Code == 404 { return nil, ErrStreamNotFound } - return nil, errors.New(resp.Error.Description) + return nil, fmt.Errorf("nats: %s", resp.Error.Description) } return resp.StreamInfo, nil @@ -675,6 +715,12 @@ type PeerInfo struct { // UpdateStream updates a Stream. func (js *js) UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { + if cfg == nil { + return nil, ErrStreamConfigRequired + } + if err := checkStreamName(cfg.Name); err != nil { + return nil, err + } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err @@ -683,10 +729,6 @@ func (js *js) UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error defer cancel() } - if cfg == nil || cfg.Name == _EMPTY_ { - return nil, ErrStreamNameRequired - } - req, err := json.Marshal(cfg) if err != nil { return nil, err @@ -715,6 +757,9 @@ type streamDeleteResponse struct { // DeleteStream deletes a Stream. func (js *js) DeleteStream(name string, opts ...JSOpt) error { + if err := checkStreamName(name); err != nil { + return err + } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return err @@ -723,10 +768,6 @@ func (js *js) DeleteStream(name string, opts ...JSOpt) error { defer cancel() } - if name == _EMPTY_ { - return ErrStreamNameRequired - } - dsSubj := js.apiSubj(fmt.Sprintf(apiStreamDeleteT, name)) r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) if err != nil { @@ -902,6 +943,9 @@ type streamPurgeResponse struct { // PurgeStream purges messages on a Stream. func (js *js) PurgeStream(stream string, opts ...JSOpt) error { + if err := checkStreamName(stream); err != nil { + return err + } return js.purgeStream(stream, nil) } diff --git a/src/vendor/github.com/nats-io/nats.go/kv.go b/src/vendor/github.com/nats-io/nats.go/kv.go index 0b75054d..d4171b55 100644 --- a/src/vendor/github.com/nats-io/nats.go/kv.go +++ b/src/vendor/github.com/nats-io/nats.go/kv.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "reflect" "regexp" "strconv" "strings" @@ -53,9 +54,9 @@ type KeyValue interface { // Update will update the value iff the latest revision matches. Update(key string, value []byte, last uint64) (revision uint64, err error) // Delete will place a delete marker and leave all revisions. - Delete(key string) error + Delete(key string, opts ...DeleteOpt) error // Purge will place a delete marker and remove all previous revisions. - Purge(key string) error + Purge(key string, opts ...DeleteOpt) error // Watch for any updates to keys that match the keys argument which could include wildcards. // Watch will send a nil entry when it has received all initial values. Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) @@ -93,6 +94,8 @@ type KeyValueStatus interface { // KeyWatcher is what is returned when doing a watch. type KeyWatcher interface { + // Context returns watcher context optionally provided by nats.Context option. + Context() context.Context // Updates returns a channel to read any updates to entries. Updates() <-chan KeyValueEntry // Stop will stop this watcher. @@ -177,6 +180,40 @@ func (ctx ContextOpt) configurePurge(opts *purgeOpts) error { return nil } +type DeleteOpt interface { + configureDelete(opts *deleteOpts) error +} + +type deleteOpts struct { + // Remove all previous revisions. + purge bool + + // Delete only if the latest revision matches. + revision uint64 +} + +type deleteOptFn func(opts *deleteOpts) error + +func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { + return opt(opts) +} + +// LastRevision deletes if the latest revision matches. +func LastRevision(revision uint64) DeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.revision = revision + return nil + }) +} + +// purge removes all previous revisions. +func purge() DeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.purge = true + return nil + }) +} + // KeyValueConfig is for configuring a KeyValue store. type KeyValueConfig struct { Bucket string @@ -187,6 +224,7 @@ type KeyValueConfig struct { MaxBytes int64 Storage StorageType Replicas int + Placement *Placement } // Used to watch all keys. @@ -326,18 +364,41 @@ func (js *js) CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) { replicas = 1 } + // We will set explicitly some values so that we can do comparison + // if we get an "already in use" error and need to check if it is same. + maxBytes := cfg.MaxBytes + if maxBytes == 0 { + maxBytes = -1 + } + maxMsgSize := cfg.MaxValueSize + if maxMsgSize == 0 { + maxMsgSize = -1 + } + // When stream's MaxAge is not set, server uses 2 minutes as the default + // for the duplicate window. If MaxAge is set, and lower than 2 minutes, + // then the duplicate window will be set to that. If MaxAge is greater, + // we will cap the duplicate window to 2 minutes (to be consistent with + // previous behavior). + duplicateWindow := 2 * time.Minute + if cfg.TTL > 0 && cfg.TTL < duplicateWindow { + duplicateWindow = cfg.TTL + } scfg := &StreamConfig{ Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), Description: cfg.Description, Subjects: []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}, MaxMsgsPerSubject: history, - MaxBytes: cfg.MaxBytes, + MaxBytes: maxBytes, MaxAge: cfg.TTL, - MaxMsgSize: cfg.MaxValueSize, + MaxMsgSize: maxMsgSize, Storage: cfg.Storage, Replicas: replicas, + Placement: cfg.Placement, AllowRollup: true, DenyDelete: true, + Duplicates: duplicateWindow, + MaxMsgs: -1, + MaxConsumers: -1, } // If we are at server version 2.7.2 or above use DiscardNew. We can not use DiscardNew for 2.7.1 or below. @@ -346,7 +407,24 @@ func (js *js) CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) { } if _, err := js.AddStream(scfg); err != nil { - return nil, err + // If we have a failure to add, it could be because we have + // a config change if the KV was created against a pre 2.7.2 + // and we are now moving to a v2.7.2+. If that is the case + // and the only difference is the discard policy, then update + // the stream. + if err == ErrStreamNameAlreadyInUse { + if si, _ := js.StreamInfo(scfg.Name); si != nil { + // To compare, make the server's stream info discard + // policy same than ours. + si.Config.Discard = scfg.Discard + if reflect.DeepEqual(&si.Config, scfg) { + _, err = js.UpdateStream(scfg) + } + } + } + if err != nil { + return nil, err + } } kv := &kvs{ @@ -547,16 +625,7 @@ func (kv *kvs) Update(key string, value []byte, revision uint64) (uint64, error) } // Delete will place a delete marker and leave all revisions. -func (kv *kvs) Delete(key string) error { - return kv.delete(key, false) -} - -// Purge will remove the key and all revisions. -func (kv *kvs) Purge(key string) error { - return kv.delete(key, true) -} - -func (kv *kvs) delete(key string, purge bool) error { +func (kv *kvs) Delete(key string, opts ...DeleteOpt) error { if !keyValid(key) { return ErrInvalidKey } @@ -571,16 +640,35 @@ func (kv *kvs) delete(key string, purge bool) error { // DEL op marker. For watch functionality. m := NewMsg(b.String()) - if purge { + var o deleteOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureDelete(&o); err != nil { + return err + } + } + } + + if o.purge { m.Header.Set(kvop, kvpurge) m.Header.Set(MsgRollup, MsgRollupSubject) } else { m.Header.Set(kvop, kvdel) } + + if o.revision != 0 { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.revision, 10)) + } + _, err := kv.js.PublishMsg(m) return err } +// Purge will remove the key and all revisions. +func (kv *kvs) Purge(key string, opts ...DeleteOpt) error { + return kv.Delete(key, append(opts, purge())...) +} + const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute // PurgeDeletes will remove all current delete markers. @@ -701,6 +789,15 @@ type watcher struct { initDone bool initPending uint64 received uint64 + ctx context.Context +} + +// Context returns the context for the watcher if set. +func (w *watcher) Context() context.Context { + if w == nil { + return nil + } + return w.ctx } // Updates returns the interior channel. @@ -743,7 +840,7 @@ func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) { keys = b.String() // We will block below on placing items on the chan. That is by design. - w := &watcher{updates: make(chan KeyValueEntry, 256)} + w := &watcher{updates: make(chan KeyValueEntry, 256), ctx: o.ctx} update := func(m *Msg) { tokens, err := getMetadataFields(m.Reply) diff --git a/src/vendor/github.com/nats-io/nats.go/nats.go b/src/vendor/github.com/nats-io/nats.go/nats.go index 7f2804fa..90f301f1 100644 --- a/src/vendor/github.com/nats-io/nats.go/nats.go +++ b/src/vendor/github.com/nats-io/nats.go/nats.go @@ -48,7 +48,7 @@ import ( // Default Constants const ( - Version = "1.13.0" + Version = "1.16.0" DefaultURL = "nats://127.0.0.1:4222" DefaultPort = 4222 DefaultMaxReconnect = 60 @@ -83,6 +83,9 @@ const ( // ACCOUNT_AUTHENTICATION_EXPIRED_ERR is for when nats server account authorization has expired. ACCOUNT_AUTHENTICATION_EXPIRED_ERR = "account authentication expired" + + // MAX_CONNECTIONS_ERR is for when nats server denies the connection due to server max_connections limit + MAX_CONNECTIONS_ERR = "maximum connections exceeded" ) // Errors @@ -142,11 +145,13 @@ var ( ErrNotJSMessage = errors.New("nats: not a jetstream message") ErrInvalidStreamName = errors.New("nats: invalid stream name") ErrInvalidDurableName = errors.New("nats: invalid durable name") + ErrInvalidConsumerName = errors.New("nats: invalid consumer name") ErrNoMatchingStream = errors.New("nats: no stream matches subject") ErrSubjectMismatch = errors.New("nats: subject does not match consumer") ErrContextAndTimeout = errors.New("nats: context and timeout can not both be set") ErrInvalidJSAck = errors.New("nats: invalid jetstream publish response") ErrMultiStreamUnsupported = errors.New("nats: multiple streams are not supported") + ErrStreamConfigRequired = errors.New("nats: stream configuration is required") ErrStreamNameRequired = errors.New("nats: stream name is required") ErrStreamNotFound = errors.New("nats: stream not found") ErrConsumerNotFound = errors.New("nats: consumer not found") @@ -160,6 +165,8 @@ var ( ErrMsgNotFound = errors.New("nats: message not found") ErrMsgAlreadyAckd = errors.New("nats: message was already acknowledged") ErrStreamInfoMaxSubjects = errors.New("nats: subject details would exceed maximum allowed") + ErrStreamNameAlreadyInUse = errors.New("nats: stream name already in use") + ErrMaxConnectionsExceeded = errors.New("nats: server maximum connections exceeded") ) func init() { @@ -460,6 +467,10 @@ type Options struct { // supports compression. If the server does too, then data will be compressed. Compression bool + // For websocket connections, adds a path to connections url. + // This is useful when connecting to NATS behind a proxy. + ProxyPath string + // InboxPrefix allows the default _INBOX prefix to be customized InboxPrefix string } @@ -1140,6 +1151,15 @@ func Compression(enabled bool) Option { } } +// ProxyPath is an option for websocket connections that adds a path to connections url. +// This is useful when connecting to NATS behind a proxy. +func ProxyPath(path string) Option { + return func(o *Options) error { + o.ProxyPath = path + return nil + } +} + // CustomInboxPrefix configures the request + reply inbox prefix func CustomInboxPrefix(p string) Option { return func(o *Options) error { @@ -1649,6 +1669,17 @@ func (w *natsWriter) doneWithPending() { w.pending = nil } +// Notify the reader that we are done with the connect, where "read" operations +// happen synchronously and under the connection lock. After this point, "read" +// will be happening from the read loop, without the connection lock. +// +// Note: this runs under the connection lock. +func (r *natsReader) doneWithConnect() { + if wsr, ok := r.r.(*websocketReader); ok { + wsr.doneWithConnect() + } +} + func (r *natsReader) Read() ([]byte, error) { if r.off >= 0 { off := r.off @@ -1803,6 +1834,21 @@ func (nc *Conn) ConnectedUrl() string { return nc.current.url.String() } +// ConnectedUrlRedacted reports the connected server's URL with passwords redacted +func (nc *Conn) ConnectedUrlRedacted() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.current.url.Redacted() +} + // ConnectedAddr returns the connected server's IP func (nc *Conn) ConnectedAddr() string { if nc == nil { @@ -1962,6 +2008,10 @@ func (nc *Conn) processConnectInit() error { go nc.readLoop() go nc.flusher() + // Notify the reader that we are done with the connect handshake, where + // reads were done synchronously and under the connection lock. + nc.br.doneWithConnect() + return nil } @@ -3182,6 +3232,8 @@ func (nc *Conn) processErr(ie string) { // FIXME(dlc) - process Slow Consumer signals special. if e == STALE_CONNECTION { nc.processOpErr(ErrStaleConnection) + } else if e == MAX_CONNECTIONS_ERR { + nc.processOpErr(ErrMaxConnectionsExceeded) } else if strings.HasPrefix(e, PERMISSIONS_ERR) { nc.processPermissionsViolation(ne) } else if authErr := checkAuthError(e); authErr != nil { @@ -3352,21 +3404,10 @@ func (nc *Conn) PublishMsg(m *Msg) error { if m == nil { return ErrInvalidMsg } - - var hdr []byte - var err error - - if len(m.Header) > 0 { - if !nc.info.Headers { - return ErrHeadersNotSupported - } - - hdr, err = m.headerBytes() - if err != nil { - return err - } + hdr, err := m.headerBytes() + if err != nil { + return err } - return nc.publish(m.Subject, m.Reply, hdr, m.Data) } @@ -3392,6 +3433,12 @@ func (nc *Conn) publish(subj, reply string, hdr, data []byte) error { } nc.mu.Lock() + // Check if headers attempted to be sent to server that does not support them. + if len(hdr) > 0 && !nc.info.Headers { + nc.mu.Unlock() + return ErrHeadersNotSupported + } + if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed @@ -3562,17 +3609,12 @@ func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Ms // RequestMsg will send a request payload including optional headers and deliver // the response message, or an error, including a timeout if no message was received properly. func (nc *Conn) RequestMsg(msg *Msg, timeout time.Duration) (*Msg, error) { - var hdr []byte - var err error - - if len(msg.Header) > 0 { - if !nc.info.Headers { - return nil, ErrHeadersNotSupported - } - hdr, err = msg.headerBytes() - if err != nil { - return nil, err - } + if msg == nil { + return nil, ErrInvalidMsg + } + hdr, err := msg.headerBytes() + if err != nil { + return nil, err } return nc.request(msg.Subject, hdr, msg.Data, timeout) diff --git a/src/vendor/github.com/nats-io/nats.go/object.go b/src/vendor/github.com/nats-io/nats.go/object.go index 4de788bc..bc68ca4b 100644 --- a/src/vendor/github.com/nats-io/nats.go/object.go +++ b/src/vendor/github.com/nats-io/nats.go/object.go @@ -1,4 +1,4 @@ -// Copyright 2021 The NATS Authors +// Copyright 2021-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -136,8 +136,10 @@ type ObjectStoreConfig struct { Bucket string Description string TTL time.Duration + MaxBytes int64 Storage StorageType Replicas int + Placement *Placement } type ObjectStoreStatus interface { @@ -243,8 +245,10 @@ func (js *js) CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) { Description: cfg.Description, Subjects: []string{chunks, meta}, MaxAge: cfg.TTL, + MaxBytes: cfg.MaxBytes, Storage: cfg.Storage, Replicas: cfg.Replicas, + Placement: cfg.Placement, Discard: DiscardNew, AllowRollup: true, } diff --git a/src/vendor/github.com/nats-io/nats.go/ws.go b/src/vendor/github.com/nats-io/nats.go/ws.go index 2ef3f7f4..a76e5822 100644 --- a/src/vendor/github.com/nats-io/nats.go/ws.go +++ b/src/vendor/github.com/nats-io/nats.go/ws.go @@ -81,6 +81,7 @@ type websocketReader struct { ib []byte ff bool fc bool + nl bool dc *wsDecompressor nc *Conn } @@ -180,6 +181,15 @@ func wsNewReader(r io.Reader) *websocketReader { return &websocketReader{r: r, ff: true} } +// From now on, reads will be from the readLoop and we will need to +// acquire the connection lock should we have to send/write a control +// message from handleControlFrame. +// +// Note: this runs under the connection lock. +func (r *websocketReader) doneWithConnect() { + r.nl = true +} + func (r *websocketReader) Read(p []byte) (int, error) { var err error var buf []byte @@ -402,12 +412,12 @@ func (r *websocketReader) handleControlFrame(frameType wsOpCode, buf []byte, pos } } } - r.nc.wsEnqueueCloseMsg(status, body) + r.nc.wsEnqueueCloseMsg(r.nl, status, body) // Return io.EOF so that readLoop will close the connection as client closed // after processing pending buffers. return pos, io.EOF case wsPingMessage: - r.nc.wsEnqueueControlMsg(wsPongMessage, payload) + r.nc.wsEnqueueControlMsg(r.nl, wsPongMessage, payload) case wsPongMessage: // Nothing to do.. } @@ -560,6 +570,15 @@ func (nc *Conn) wsInitHandshake(u *url.URL) error { scheme = "https" } ustr := fmt.Sprintf("%s://%s", scheme, u.Host) + + if nc.Opts.ProxyPath != "" { + proxyPath := nc.Opts.ProxyPath + if !strings.HasPrefix(proxyPath, "/") { + proxyPath = "/" + proxyPath + } + ustr += proxyPath + } + u, err = url.Parse(ustr) if err != nil { return err @@ -644,14 +663,16 @@ func (nc *Conn) wsClose() { nc.wsEnqueueCloseMsgLocked(wsCloseStatusNormalClosure, _EMPTY_) } -func (nc *Conn) wsEnqueueCloseMsg(status int, payload string) { +func (nc *Conn) wsEnqueueCloseMsg(needsLock bool, status int, payload string) { // In some low-level unit tests it will happen... if nc == nil { return } - nc.mu.Lock() + if needsLock { + nc.mu.Lock() + defer nc.mu.Unlock() + } nc.wsEnqueueCloseMsgLocked(status, payload) - nc.mu.Unlock() } func (nc *Conn) wsEnqueueCloseMsgLocked(status int, payload string) { @@ -675,25 +696,26 @@ func (nc *Conn) wsEnqueueCloseMsgLocked(status int, payload string) { nc.bw.flush() } -func (nc *Conn) wsEnqueueControlMsg(frameType wsOpCode, payload []byte) { +func (nc *Conn) wsEnqueueControlMsg(needsLock bool, frameType wsOpCode, payload []byte) { // In some low-level unit tests it will happen... if nc == nil { return } - fh, key := wsCreateFrameHeader(false, frameType, len(payload)) - nc.mu.Lock() + if needsLock { + nc.mu.Lock() + defer nc.mu.Unlock() + } wr, ok := nc.bw.w.(*websocketWriter) if !ok { - nc.mu.Unlock() return } + fh, key := wsCreateFrameHeader(false, frameType, len(payload)) wr.ctrlFrames = append(wr.ctrlFrames, fh) if len(payload) > 0 { wsMaskBuf(key, payload) wr.ctrlFrames = append(wr.ctrlFrames, payload) } nc.bw.flush() - nc.mu.Unlock() } func wsPMCExtensionSupport(header http.Header) (bool, bool) { diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go new file mode 100644 index 00000000..f4c92913 --- /dev/null +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go @@ -0,0 +1,40 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package collectors provides implementations of prometheus.Collector to +// conveniently collect process and Go-related metrics. +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewBuildInfoCollector() +} diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go new file mode 100644 index 00000000..e09f149d --- /dev/null +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go @@ -0,0 +1,119 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +type dbStatsCollector struct { + db *sql.DB + + maxOpenConnections *prometheus.Desc + + openConnections *prometheus.Desc + inUseConnections *prometheus.Desc + idleConnections *prometheus.Desc + + waitCount *prometheus.Desc + waitDuration *prometheus.Desc + maxIdleClosed *prometheus.Desc + maxIdleTimeClosed *prometheus.Desc + maxLifetimeClosed *prometheus.Desc +} + +// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. +// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. +func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { + fqName := func(name string) string { + return "go_sql_" + name + } + return &dbStatsCollector{ + db: db, + maxOpenConnections: prometheus.NewDesc( + fqName("max_open_connections"), + "Maximum number of open connections to the database.", + nil, prometheus.Labels{"db_name": dbName}, + ), + openConnections: prometheus.NewDesc( + fqName("open_connections"), + "The number of established connections both in use and idle.", + nil, prometheus.Labels{"db_name": dbName}, + ), + inUseConnections: prometheus.NewDesc( + fqName("in_use_connections"), + "The number of connections currently in use.", + nil, prometheus.Labels{"db_name": dbName}, + ), + idleConnections: prometheus.NewDesc( + fqName("idle_connections"), + "The number of idle connections.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitCount: prometheus.NewDesc( + fqName("wait_count_total"), + "The total number of connections waited for.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitDuration: prometheus.NewDesc( + fqName("wait_duration_seconds_total"), + "The total time blocked waiting for a new connection.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleClosed: prometheus.NewDesc( + fqName("max_idle_closed_total"), + "The total number of connections closed due to SetMaxIdleConns.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleTimeClosed: prometheus.NewDesc( + fqName("max_idle_time_closed_total"), + "The total number of connections closed due to SetConnMaxIdleTime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxLifetimeClosed: prometheus.NewDesc( + fqName("max_lifetime_closed_total"), + "The total number of connections closed due to SetConnMaxLifetime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + } +} + +// Describe implements Collector. +func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.maxOpenConnections + ch <- c.openConnections + ch <- c.inUseConnections + ch <- c.idleConnections + ch <- c.waitCount + ch <- c.waitDuration + ch <- c.maxIdleClosed + ch <- c.maxLifetimeClosed + c.describeNewInGo115(ch) +} + +// Collect implements Collector. +func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { + stats := c.db.Stats() + ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) + ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) + ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) + ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) + ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) + ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) + ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) + ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) + c.collectNewInGo115(ch, stats) +} diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go new file mode 100644 index 00000000..6d152fbf --- /dev/null +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.15 +// +build go1.15 + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) { + ch <- c.maxIdleTimeClosed +} + +func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) { + ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) +} diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go new file mode 100644 index 00000000..65235069 --- /dev/null +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.15 +// +build !go1.15 + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) {} + +func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) {} diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go new file mode 100644 index 00000000..3aa8d059 --- /dev/null +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -0,0 +1,57 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewExpvarCollector returns a newly allocated expvar Collector. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewExpvarCollector(exports) +} diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go new file mode 100644 index 00000000..effc5784 --- /dev/null +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go @@ -0,0 +1,49 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.17 +// +build !go1.17 + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to β€œstop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25Β΅s) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. +// +// NOTE: The problem is solved in Go 1.15, see +// https://github.com/golang/go/issues/19812 for the related Go issue. +func NewGoCollector() prometheus.Collector { + return prometheus.NewGoCollector() +} diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go new file mode 100644 index 00000000..01790e88 --- /dev/null +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -0,0 +1,91 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +//nolint:staticcheck // Ignore SA1019 until v2. +type goOptions = prometheus.GoCollectorOptions +type goOption func(o *goOptions) + +type GoCollectionOption uint32 + +const ( + // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure such as + // go_memstats_alloc_bytes + // go_memstats_alloc_bytes_total + // go_memstats_sys_bytes + // go_memstats_lookups_total + // go_memstats_mallocs_total + // go_memstats_frees_total + // go_memstats_heap_alloc_bytes + // go_memstats_heap_sys_bytes + // go_memstats_heap_idle_bytes + // go_memstats_heap_inuse_bytes + // go_memstats_heap_released_bytes + // go_memstats_heap_objects + // go_memstats_stack_inuse_bytes + // go_memstats_stack_sys_bytes + // go_memstats_mspan_inuse_bytes + // go_memstats_mspan_sys_bytes + // go_memstats_mcache_inuse_bytes + // go_memstats_mcache_sys_bytes + // go_memstats_buck_hash_sys_bytes + // go_memstats_gc_sys_bytes + // go_memstats_other_sys_bytes + // go_memstats_next_gc_bytes + // so the metrics known from pre client_golang v1.12.0, except skipped go_memstats_gc_cpu_fraction (see + // https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 for explanation. + // + // NOTE that this mode represents runtime.MemStats statistics, but they are + // actually implemented using new runtime/metrics package. + // Deprecated: Use GoRuntimeMetricsCollection instead going forward. + GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota + // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package and follows + // consistent naming. The exposed metric set depends on Go version, but it is controlled against + // unexpected cardinality. This set has overlapping information with GoRuntimeMemStatsCollection, just with + // new names. GoRuntimeMetricsCollection is what is recommended for using going forward. + GoRuntimeMetricsCollection +) + +// WithGoCollections allows enabling different collections for Go collector on top of base metrics +// like go_goroutines, go_threads, go_gc_duration_seconds, go_memstats_last_gc_time_seconds, go_info. +// +// Check GoRuntimeMemStatsCollection and GoRuntimeMetricsCollection for more details. You can use none, +// one or more collections at once. For example: +// WithGoCollections(GoRuntimeMemStatsCollection | GoRuntimeMetricsCollection) means both GoRuntimeMemStatsCollection +// metrics and GoRuntimeMetricsCollection will be exposed. +// +// The current default is GoRuntimeMemStatsCollection, so the compatibility mode with +// client_golang pre v1.12 (move to runtime/metrics). +func WithGoCollections(flags GoCollectionOption) goOption { + return func(o *goOptions) { + o.EnabledCollections = uint32(flags) + } +} + +// NewGoCollector returns a collector that exports metrics about the current Go +// process using debug.GCStats using runtime/metrics. +func NewGoCollector(opts ...goOption) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + promPkgOpts := make([]func(o *prometheus.GoCollectorOptions), len(opts)) + for i, opt := range opts { + promPkgOpts[i] = opt + } + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewGoCollector(promPkgOpts...) +} diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go new file mode 100644 index 00000000..24558f50 --- /dev/null +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ + PidFn: opts.PidFn, + Namespace: opts.Namespace, + ReportErrors: opts.ReportErrors, + }) +} diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 08195b41..4d792aa2 100644 --- a/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -197,14 +197,6 @@ func goRuntimeMemStats() memStatsMetrics { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, }, } } @@ -268,7 +260,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) { quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) } @@ -278,6 +269,7 @@ func memstatNamespace(s string) string { // memStatsMetrics provide description, evaluator, runtime/metrics name, and // value type for memstat metrics. +// TODO(bwplotka): Remove with end Go 1.16 EOL and replace with runtime/metrics.Description type memStatsMetrics []struct { desc *Desc eval func(*runtime.MemStats) float64 diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go index 24526131..897a6e90 100644 --- a/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go @@ -40,13 +40,28 @@ type goCollector struct { // // Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { + msMetrics := goRuntimeMemStats() + msMetrics = append(msMetrics, struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType + }{ + // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }) return &goCollector{ base: newBaseGoCollector(), msLast: &runtime.MemStats{}, msRead: runtime.ReadMemStats, msMaxWait: time.Second, msMaxAge: 5 * time.Minute, - msMetrics: goRuntimeMemStats(), + msMetrics: msMetrics, } } diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go similarity index 59% rename from src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go rename to src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index d43bdcdd..a0fe95eb 100644 --- a/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -25,11 +25,71 @@ import ( //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" - "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" + goGCHeapAllocsObjects = "/gc/heap/allocs:objects" + goGCHeapFreesObjects = "/gc/heap/frees:objects" + goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" + goGCHeapObjects = "/gc/heap/objects:objects" + goGCHeapGoalBytes = "/gc/heap/goal:bytes" + goMemoryClassesTotalBytes = "/memory/classes/total:bytes" + goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" + goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" + goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" + goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" + goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" + goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" + goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" + goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" + goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" + goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" + goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" + goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" + goMemoryClassesOtherBytes = "/memory/classes/other:bytes" ) +// runtime/metrics names required for runtimeMemStats like logic. +var rmForMemStats = []string{goGCHeapTinyAllocsObjects, + goGCHeapAllocsObjects, + goGCHeapFreesObjects, + goGCHeapAllocsBytes, + goGCHeapObjects, + goGCHeapGoalBytes, + goMemoryClassesTotalBytes, + goMemoryClassesHeapObjectsBytes, + goMemoryClassesHeapUnusedBytes, + goMemoryClassesHeapReleasedBytes, + goMemoryClassesHeapFreeBytes, + goMemoryClassesHeapStacksBytes, + goMemoryClassesOSStacksBytes, + goMemoryClassesMetadataMSpanInuseBytes, + goMemoryClassesMetadataMSPanFreeBytes, + goMemoryClassesMetadataMCacheInuseBytes, + goMemoryClassesMetadataMCacheFreeBytes, + goMemoryClassesProfilingBucketsBytes, + goMemoryClassesMetadataOtherBytes, + goMemoryClassesOtherBytes, +} + +func bestEffortLookupRM(lookup []string) []metrics.Description { + ret := make([]metrics.Description, 0, len(lookup)) + for _, rm := range metrics.All() { + for _, m := range lookup { + if m == rm.Name { + ret = append(ret, rm) + } + } + } + return ret +} + type goCollector struct { + opt GoCollectorOptions base baseGoCollector // mu protects updates to all fields ensuring a consistent @@ -51,12 +111,46 @@ type goCollector struct { msMetrics memStatsMetrics } +const ( + // Those are not exposed due to need to move Go collector to another package in v2. + // See issue https://github.com/prometheus/client_golang/issues/1030. + goRuntimeMemStatsCollection uint32 = 1 << iota + goRuntimeMetricsCollection +) + +// GoCollectorOptions should not be used be directly by anything, except `collectors` package. +// Use it via collectors package instead. See issue +// https://github.com/prometheus/client_golang/issues/1030. +// +// Deprecated: Use collectors.WithGoCollections +type GoCollectorOptions struct { + // EnabledCollection sets what type of collections collector should expose on top of base collection. + // By default it's goMemStatsCollection | goRuntimeMetricsCollection. + EnabledCollections uint32 +} + +func (c GoCollectorOptions) isEnabled(flag uint32) bool { + return c.EnabledCollections&flag != 0 +} + +const defaultGoCollections = goRuntimeMemStatsCollection + // NewGoCollector is the obsolete version of collectors.NewGoCollector. // See there for documentation. // // Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - descriptions := metrics.All() +func NewGoCollector(opts ...func(o *GoCollectorOptions)) Collector { + opt := GoCollectorOptions{EnabledCollections: defaultGoCollections} + for _, o := range opts { + o(&opt) + } + + var descriptions []metrics.Description + if opt.isEnabled(goRuntimeMetricsCollection) { + descriptions = metrics.All() + } else if opt.isEnabled(goRuntimeMemStatsCollection) { + descriptions = bestEffortLookupRM(rmForMemStats) + } // Collect all histogram samples so that we can get their buckets. // The API guarantees that the buckets are always fixed for the lifetime @@ -67,7 +161,11 @@ func NewGoCollector() Collector { histograms = append(histograms, metrics.Sample{Name: d.Name}) } } - metrics.Read(histograms) + + if len(histograms) > 0 { + metrics.Read(histograms) + } + bucketsMap := make(map[string][]float64) for i := range histograms { bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets @@ -83,7 +181,7 @@ func NewGoCollector() Collector { if !ok { // Just ignore this metric; we can't do anything with it here. // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested elsewhere. + // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } @@ -123,12 +221,18 @@ func NewGoCollector() Collector { } metricSet = append(metricSet, m) } + + var msMetrics memStatsMetrics + if opt.isEnabled(goRuntimeMemStatsCollection) { + msMetrics = goRuntimeMemStats() + } return &goCollector{ + opt: opt, base: newBaseGoCollector(), rmSampleBuf: sampleBuf, rmSampleMap: sampleMap, rmMetrics: metricSet, - msMetrics: goRuntimeMemStats(), + msMetrics: msMetrics, } } @@ -163,40 +267,47 @@ func (c *goCollector) Collect(ch chan<- Metric) { c.mu.Lock() defer c.mu.Unlock() - // Populate runtime/metrics sample buffer. - metrics.Read(c.rmSampleBuf) - - // Update all our metrics from rmSampleBuf. - for i, sample := range c.rmSampleBuf { - // N.B. switch on concrete type because it's significantly more efficient - // than checking for the Counter and Gauge interface implementations. In - // this case, we control all the types here. - switch m := c.rmMetrics[i].(type) { - case *counter: - // Guard against decreases. This should never happen, but a failure - // to do so will result in a panic, which is a harsh consequence for - // a metrics collection bug. - v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) - if v1 > v0 { - m.Add(unwrapScalarRMValue(sample.Value) - m.get()) + if len(c.rmSampleBuf) > 0 { + // Populate runtime/metrics sample buffer. + metrics.Read(c.rmSampleBuf) + } + + if c.opt.isEnabled(goRuntimeMetricsCollection) { + // Collect all our metrics from rmSampleBuf. + for i, sample := range c.rmSampleBuf { + // N.B. switch on concrete type because it's significantly more efficient + // than checking for the Counter and Gauge interface implementations. In + // this case, we control all the types here. + switch m := c.rmMetrics[i].(type) { + case *counter: + // Guard against decreases. This should never happen, but a failure + // to do so will result in a panic, which is a harsh consequence for + // a metrics collection bug. + v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) + if v1 > v0 { + m.Add(unwrapScalarRMValue(sample.Value) - m.get()) + } + m.Collect(ch) + case *gauge: + m.Set(unwrapScalarRMValue(sample.Value)) + m.Collect(ch) + case *batchHistogram: + m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) + m.Collect(ch) + default: + panic("unexpected metric type") } - m.Collect(ch) - case *gauge: - m.Set(unwrapScalarRMValue(sample.Value)) - m.Collect(ch) - case *batchHistogram: - m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) - m.Collect(ch) - default: - panic("unexpected metric type") } } + // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it. - var ms runtime.MemStats - memStatsFromRM(&ms, c.rmSampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + // populate the old metrics from it if goMemStatsCollection is enabled. + if c.opt.isEnabled(goRuntimeMemStatsCollection) { + var ms runtime.MemStats + memStatsFromRM(&ms, c.rmSampleMap) + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + } } } @@ -261,35 +372,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { // while having Mallocs - Frees still represent a live object count. // Unfortunately, MemStats doesn't actually export a large allocation count, // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects") - ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs - ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs + tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) + ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs + ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs - ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes") - ms.Sys = lookupOrZero("/memory/classes/total:bytes") + ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) + ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes") + ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes") - ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes") - ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes") + ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) + ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) + ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects") - ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes") - ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes") - ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes") - ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes") - ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes") - ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes") - ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes") - ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes") - ms.OtherSys = lookupOrZero("/memory/classes/other:bytes") - ms.NextGC = lookupOrZero("/gc/heap/goal:bytes") - - // N.B. LastGC is omitted because runtime.GCStats already has this. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.LastGC = 0 + ms.HeapObjects = lookupOrZero(goGCHeapObjects) + ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) + ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) + ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) + ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) + ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) + ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) + ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) + ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) + ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) + ms.NextGC = lookupOrZero(goGCHeapGoalBytes) // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, // and often misleading due to the fact that it's an average over the lifetime @@ -324,6 +430,11 @@ type batchHistogram struct { // buckets must always be from the runtime/metrics package, following // the same conventions. func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { + // We need to remove -Inf values. runtime/metrics keeps them around. + // But -Inf bucket should not be allowed for prometheus histograms. + if buckets[0] == math.Inf(-1) { + buckets = buckets[1:] + } h := &batchHistogram{ desc: desc, buckets: buckets, @@ -382,8 +493,10 @@ func (h *batchHistogram) Write(out *dto.Metric) error { for i, count := range h.counts { totalCount += count if !h.hasSum { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) + if count != 0 { + // N.B. This computed sum is an underestimate. + sum += h.buckets[i] * float64(count) + } } // Skip the +Inf bucket, but only for the bucket list. diff --git a/src/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/src/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index fe0a5218..6cbe063a 100644 --- a/src/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/src/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -62,7 +62,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) // other data. name = strings.ReplaceAll(name, "-", "_") name = name + "_" + unit - if d.Cumulative { + if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { name = name + "_total" } @@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { switch unit { case "bytes": - // Rebucket as powers of 2. - return rebucketExp(buckets, 2) + // Re-bucket as powers of 2. + return reBucketExp(buckets, 2) case "seconds": - // Rebucket as powers of 10 and then merge all buckets greater + // Re-bucket as powers of 10 and then merge all buckets greater // than 1 second into the +Inf bucket. - b := rebucketExp(buckets, 10) + b := reBucketExp(buckets, 10) for i := range b { if b[i] <= 1 { continue @@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { return buckets } -// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and +// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and // downsamples the buckets to those a multiple of base apart. The end result // is a roughly exponential (in many cases, perfectly exponential) bucketing // scheme. -func rebucketExp(buckets []float64, base float64) []float64 { +func reBucketExp(buckets []float64, base float64) []float64 { bucket := buckets[0] var newBuckets []float64 // We may see a -Inf here, in which case, add it and skip it diff --git a/src/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/src/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index b6b00cf2..d61301b1 100644 --- a/src/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/src/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -1,7 +1,3 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - package inspector // This file defines func typeOf(ast.Node) uint64. diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index 4c8cf090..c60fece9 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -1,5 +1,5 @@ -# code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee -## explicit +# code.cloudfoundry.org/go-diodes v0.0.0-20220601181242-ac2da19efd60 +## explicit; go 1.18 code.cloudfoundry.org/go-diodes # code.cloudfoundry.org/go-envstruct v1.5.0 ## explicit; go 1.12 @@ -8,18 +8,18 @@ code.cloudfoundry.org/go-envstruct ## explicit; go 1.12 code.cloudfoundry.org/go-loggregator code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2 -# code.cloudfoundry.org/go-metric-registry v0.0.0-20200413202920-40d97c8804ec -## explicit; go 1.12 +# code.cloudfoundry.org/go-metric-registry v0.0.0-20220601181753-a64f39ee2597 +## explicit; go 1.18 code.cloudfoundry.org/go-metric-registry code.cloudfoundry.org/go-metric-registry/testhelpers -# code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a +# code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 ## explicit code.cloudfoundry.org/rfc5424 -# code.cloudfoundry.org/tlsconfig v0.0.0-20200131000646-bbe0f8da39b3 -## explicit; go 1.13 +# code.cloudfoundry.org/tlsconfig v0.0.0-20211123175040-23cc9f05b6b3 +## explicit; go 1.16 code.cloudfoundry.org/tlsconfig code.cloudfoundry.org/tlsconfig/certtest -# github.com/armon/go-metrics v0.3.3 +# github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 ## explicit; go 1.12 github.com/armon/go-metrics # github.com/beorn7/perks v1.0.1 @@ -37,8 +37,6 @@ github.com/fsnotify/fsnotify # github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 ## explicit; go 1.13 github.com/go-task/slim-sprig -# github.com/gogo/protobuf v1.3.2 -## explicit; go 1.15 # github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 github.com/golang/protobuf/jsonpb @@ -50,26 +48,18 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/hashicorp/go-hclog v1.2.1 ## explicit; go 1.13 github.com/hashicorp/go-hclog -# github.com/hashicorp/go-immutable-radix v1.2.0 +# github.com/hashicorp/go-immutable-radix v1.0.0 ## explicit github.com/hashicorp/go-immutable-radix -# github.com/hashicorp/go-msgpack v1.1.5 -## explicit; go 1.13 +# github.com/hashicorp/go-msgpack v0.5.5 +## explicit github.com/hashicorp/go-msgpack/codec -# github.com/hashicorp/go-uuid v1.0.2 +# github.com/hashicorp/golang-lru v0.5.1 ## explicit -# github.com/hashicorp/golang-lru v0.5.4 -## explicit; go 1.12 github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/raft v1.3.9 ## explicit; go 1.12 github.com/hashicorp/raft -# github.com/klauspost/compress v1.15.1 -## explicit; go 1.15 -# github.com/kr/pretty v0.2.0 -## explicit; go 1.12 -# github.com/kr/text v0.2.0 -## explicit # github.com/mattn/go-colorable v0.1.12 ## explicit; go 1.13 github.com/mattn/go-colorable @@ -79,9 +69,9 @@ github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.1 ## explicit github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/nats-io/nats-server/v2 v2.7.4 +# github.com/nats-io/nats-server/v2 v2.8.4 ## explicit; go 1.17 -# github.com/nats-io/nats.go v1.13.1-0.20220308171302-2f2f6968e98d +# github.com/nats-io/nats.go v1.16.0 ## explicit; go 1.16 github.com/nats-io/nats.go github.com/nats-io/nats.go/encoders/builtin @@ -142,9 +132,10 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/prometheus/client_golang v1.12.1 +# github.com/prometheus/client_golang v1.12.2 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.2.0 @@ -163,7 +154,7 @@ github.com/prometheus/procfs/internal/util # github.com/square/certstrap v1.2.0 ## explicit github.com/square/certstrap/pkix -# golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 +# golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd ## explicit; go 1.17 golang.org/x/crypto/ed25519 # golang.org/x/net v0.0.0-20220225172249-27dd8689420f @@ -205,9 +196,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 -## explicit -# golang.org/x/tools v0.0.0-20210106214847-113979e3529a +# golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e ## explicit; go 1.12 golang.org/x/tools/go/ast/inspector # google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 From a24fb80a8cc71ce03f0e3b1fdef863fc1a1dbb2a Mon Sep 17 00:00:00 2001 From: Carson Long Date: Tue, 14 Jun 2022 22:57:47 -0400 Subject: [PATCH 2/2] Upgrade to ginkgo v2 * Use `github.com/onsi/ginkgo/v2` package everywhere. * Replace deprecated `GinkgoParallelNode` with `GinkgoParallelProcess`. * Update `Go / test` workflow to use ginkgo v2 CLI and flags. https://onsi.github.io/ginkgo/#recommended-continuous-integration-configuration. --- .github/workflows/go.yml | 2 +- .../app/agent_suite_test.go | 2 +- src/cmd/leadership-election/app/agent_test.go | 4 +- src/cmd/metric-scraper/app/app_suite_test.go | 2 +- .../metric-scraper/app/metric_scraper_test.go | 2 +- src/go.mod | 6 +- src/go.sum | 9 +- .../testhelper/testhelper_suite_test.go | 2 +- src/pkg/scraper/config_provider_test.go | 2 +- src/pkg/scraper/dns_ip_provider_test.go | 2 +- src/pkg/scraper/scraper_error_test.go | 2 +- src/pkg/scraper/scraper_suite_test.go | 2 +- src/pkg/scraper/scraper_test.go | 2 +- src/tools.go | 2 +- src/vendor/github.com/google/pprof/AUTHORS | 7 + .../github.com/google/pprof/CONTRIBUTORS | 16 + src/vendor/github.com/google/pprof/LICENSE | 202 +++ .../github.com/google/pprof/profile/encode.go | 567 ++++++++ .../github.com/google/pprof/profile/filter.go | 270 ++++ .../github.com/google/pprof/profile/index.go | 64 + .../pprof/profile/legacy_java_profile.go | 315 +++++ .../google/pprof/profile/legacy_profile.go | 1225 +++++++++++++++++ .../github.com/google/pprof/profile/merge.go | 481 +++++++ .../google/pprof/profile/profile.go | 805 +++++++++++ .../github.com/google/pprof/profile/proto.go | 370 +++++ .../github.com/google/pprof/profile/prune.go | 178 +++ src/vendor/github.com/onsi/ginkgo/.travis.yml | 24 - src/vendor/github.com/onsi/ginkgo/README.md | 169 --- .../github.com/onsi/ginkgo/ginkgo_dsl.go | 681 --------- .../onsi/ginkgo/internal/global/init.go | 22 - .../ginkgo/internal/specrunner/random_id.go | 15 - .../ginkgo/internal/specrunner/spec_runner.go | 411 ------ .../onsi/ginkgo/internal/suite/suite.go | 227 --- .../onsi/ginkgo/{ => v2}/.gitignore | 4 +- .../onsi/ginkgo/{ => v2}/CHANGELOG.md | 53 +- .../onsi/ginkgo/{ => v2}/CONTRIBUTING.md | 28 +- src/vendor/github.com/onsi/ginkgo/v2/LICENSE | 20 + .../github.com/onsi/ginkgo/v2/README.md | 119 ++ .../onsi/ginkgo/{ => v2}/RELEASING.md | 2 +- .../onsi/ginkgo/v2/config/deprecated.go | 69 + .../github.com/onsi/ginkgo/v2/core_dsl.go | 687 +++++++++ .../onsi/ginkgo/v2/decorator_dsl.go | 82 ++ .../onsi/ginkgo/v2/deprecated_dsl.go | 135 ++ .../ginkgo/v2/formatter/colorable_others.go | 41 + .../ginkgo/v2/formatter/colorable_windows.go | 809 +++++++++++ .../onsi/ginkgo/v2/formatter/formatter.go | 195 +++ .../ginkgo/v2/ginkgo/build/build_command.go | 61 + .../onsi/ginkgo/v2/ginkgo/command/abort.go | 61 + .../onsi/ginkgo/v2/ginkgo/command/command.go | 50 + .../onsi/ginkgo/v2/ginkgo/command/program.go | 182 +++ .../ginkgo/generators/boostrap_templates.go | 48 + .../v2/ginkgo/generators/bootstrap_command.go | 113 ++ .../v2/ginkgo/generators/generate_command.go | 239 ++++ .../ginkgo/generators/generate_templates.go | 41 + .../v2/ginkgo/generators/generators_common.go | 63 + .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 152 ++ .../ginkgo/internal/profiles_and_reports.go | 237 ++++ .../onsi/ginkgo/v2/ginkgo/internal/run.go | 348 +++++ .../ginkgo/v2/ginkgo/internal/test_suite.go | 283 ++++ .../onsi/ginkgo/v2/ginkgo/internal/utils.go | 86 ++ .../ginkgo/v2/ginkgo/labels/labels_command.go | 123 ++ .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 58 + .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 218 +++ .../onsi/ginkgo/v2/ginkgo/outline/import.go | 65 + .../onsi/ginkgo/v2/ginkgo/outline/outline.go | 103 ++ .../v2/ginkgo/outline/outline_command.go | 98 ++ .../onsi/ginkgo/v2/ginkgo/run/run_command.go | 230 ++++ .../v2/ginkgo/unfocus/unfocus_command.go | 186 +++ .../onsi/ginkgo/v2/ginkgo/watch/delta.go | 22 + .../ginkgo/v2/ginkgo/watch/delta_tracker.go | 75 + .../ginkgo/v2/ginkgo/watch/dependencies.go | 92 ++ .../ginkgo/v2/ginkgo/watch/package_hash.go | 108 ++ .../ginkgo/v2/ginkgo/watch/package_hashes.go | 85 ++ .../onsi/ginkgo/v2/ginkgo/watch/suite.go | 87 ++ .../ginkgo/v2/ginkgo/watch/watch_command.go | 190 +++ .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 45 + .../onsi/ginkgo/v2/internal/counter.go | 9 + .../onsi/ginkgo/v2/internal/failer.go | 99 ++ .../onsi/ginkgo/v2/internal/focus.go | 125 ++ .../onsi/ginkgo/v2/internal/global/init.go | 17 + .../onsi/ginkgo/v2/internal/group.go | 544 ++++++++ .../interrupt_handler/interrupt_handler.go | 212 +++ .../sigquit_swallower_unix.go | 15 + .../sigquit_swallower_windows.go | 8 + .../onsi/ginkgo/v2/internal/node.go | 660 +++++++++ .../onsi/ginkgo/v2/internal/ordering.go | 121 ++ .../ginkgo/v2/internal/output_interceptor.go | 250 ++++ .../v2/internal/output_interceptor_unix.go | 62 + .../v2/internal/output_interceptor_win.go | 7 + .../parallel_support/client_server.go | 69 + .../internal/parallel_support/http_client.go | 152 ++ .../internal/parallel_support/http_server.go | 214 +++ .../internal/parallel_support/rpc_client.go | 119 ++ .../internal/parallel_support/rpc_server.go | 75 + .../parallel_support/server_handler.go | 202 +++ .../onsi/ginkgo/v2/internal/report_entry.go | 40 + .../onsi/ginkgo/v2/internal/spec.go | 71 + .../onsi/ginkgo/v2/internal/suite.go | 629 +++++++++ .../internal/testingtproxy/testing_t_proxy.go | 69 +- .../onsi/ginkgo/v2/internal/tree.go | 77 ++ .../onsi/ginkgo/v2/internal/writer.go | 103 ++ .../ginkgo/v2/reporters/default_reporter.go | 410 ++++++ .../v2/reporters/deprecated_reporter.go | 149 ++ .../onsi/ginkgo/v2/reporters/json_report.go | 60 + .../onsi/ginkgo/v2/reporters/junit_report.go | 307 +++++ .../onsi/ginkgo/v2/reporters/reporter.go | 19 + .../ginkgo/v2/reporters/teamcity_report.go | 97 ++ .../onsi/ginkgo/v2/reporting_dsl.go | 153 ++ .../github.com/onsi/ginkgo/v2/table_dsl.go | 265 ++++ .../onsi/ginkgo/v2/types/code_location.go | 92 ++ .../github.com/onsi/ginkgo/v2/types/config.go | 723 ++++++++++ .../onsi/ginkgo/v2/types/deprecated_types.go | 141 ++ .../ginkgo/v2/types/deprecation_support.go | 170 +++ .../onsi/ginkgo/v2/types/enum_support.go | 43 + .../github.com/onsi/ginkgo/v2/types/errors.go | 534 +++++++ .../onsi/ginkgo/v2/types/file_filter.go | 106 ++ .../github.com/onsi/ginkgo/v2/types/flags.go | 489 +++++++ .../onsi/ginkgo/v2/types/label_filter.go | 347 +++++ .../onsi/ginkgo/v2/types/report_entry.go | 185 +++ .../github.com/onsi/ginkgo/v2/types/types.go | 544 ++++++++ .../onsi/ginkgo/v2/types/version.go | 3 + .../x/tools/go/ast/inspector/typeof.go | 13 +- .../x/tools/internal/typeparams/common.go | 180 +++ .../internal/typeparams/enabled_go117.go | 12 + .../internal/typeparams/enabled_go118.go | 15 + .../x/tools/internal/typeparams/normalize.go | 216 +++ .../x/tools/internal/typeparams/termlist.go | 172 +++ .../internal/typeparams/typeparams_go117.go | 197 +++ .../internal/typeparams/typeparams_go118.go | 151 ++ .../x/tools/internal/typeparams/typeterm.go | 170 +++ src/vendor/modules.txt | 35 +- 131 files changed, 20400 insertions(+), 1630 deletions(-) create mode 100644 src/vendor/github.com/google/pprof/AUTHORS create mode 100644 src/vendor/github.com/google/pprof/CONTRIBUTORS create mode 100644 src/vendor/github.com/google/pprof/LICENSE create mode 100644 src/vendor/github.com/google/pprof/profile/encode.go create mode 100644 src/vendor/github.com/google/pprof/profile/filter.go create mode 100644 src/vendor/github.com/google/pprof/profile/index.go create mode 100644 src/vendor/github.com/google/pprof/profile/legacy_java_profile.go create mode 100644 src/vendor/github.com/google/pprof/profile/legacy_profile.go create mode 100644 src/vendor/github.com/google/pprof/profile/merge.go create mode 100644 src/vendor/github.com/google/pprof/profile/profile.go create mode 100644 src/vendor/github.com/google/pprof/profile/proto.go create mode 100644 src/vendor/github.com/google/pprof/profile/prune.go delete mode 100644 src/vendor/github.com/onsi/ginkgo/.travis.yml delete mode 100644 src/vendor/github.com/onsi/ginkgo/README.md delete mode 100644 src/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go delete mode 100644 src/vendor/github.com/onsi/ginkgo/internal/global/init.go delete mode 100644 src/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go delete mode 100644 src/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go delete mode 100644 src/vendor/github.com/onsi/ginkgo/internal/suite/suite.go rename src/vendor/github.com/onsi/ginkgo/{ => v2}/.gitignore (79%) rename src/vendor/github.com/onsi/ginkgo/{ => v2}/CHANGELOG.md (89%) rename src/vendor/github.com/onsi/ginkgo/{ => v2}/CONTRIBUTING.md (50%) create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/LICENSE create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/README.md rename src/vendor/github.com/onsi/ginkgo/{ => v2}/RELEASING.md (93%) create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/core_dsl.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/counter.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/failer.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/focus.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/group.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/node.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/spec.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/suite.go rename src/vendor/github.com/onsi/ginkgo/{ => v2}/internal/testingtproxy/testing_t_proxy.go (56%) create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/tree.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/internal/writer.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/table_dsl.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/code_location.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/config.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/errors.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/flags.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/types.go create mode 100644 src/vendor/github.com/onsi/ginkgo/v2/types/version.go create mode 100644 src/vendor/golang.org/x/tools/internal/typeparams/common.go create mode 100644 src/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go create mode 100644 src/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go create mode 100644 src/vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 src/vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 src/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go create mode 100644 src/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go create mode 100644 src/vendor/golang.org/x/tools/internal/typeparams/typeterm.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 40ef1e29..5523dbe9 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/setup-go@v3 with: go-version: 1.18 - - run: go run github.com/onsi/ginkgo/ginkgo -r -p -race -randomizeAllSpecs -randomizeSuites + - run: go run github.com/onsi/ginkgo/v2/ginkgo -r --procs=3 --compilers=3 --randomize-all --randomize-suites --keep-going --race --trace working-directory: src vet: diff --git a/src/cmd/leadership-election/app/agent_suite_test.go b/src/cmd/leadership-election/app/agent_suite_test.go index 1a7dc4a7..bcdeed59 100644 --- a/src/cmd/leadership-election/app/agent_suite_test.go +++ b/src/cmd/leadership-election/app/agent_suite_test.go @@ -3,7 +3,7 @@ package app_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/src/cmd/leadership-election/app/agent_test.go b/src/cmd/leadership-election/app/agent_test.go index 8e6fa279..e05a3fcb 100644 --- a/src/cmd/leadership-election/app/agent_test.go +++ b/src/cmd/leadership-election/app/agent_test.go @@ -10,7 +10,7 @@ import ( "code.cloudfoundry.org/tlsconfig/certtest" "github.com/onsi/gomega/types" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "code.cloudfoundry.org/system-metrics-scraper/cmd/leadership-election/app" @@ -28,7 +28,7 @@ var _ = Describe("Agent", func() { ) BeforeEach(func() { - run += GinkgoParallelNode() * 15 + run += GinkgoParallelProcess() * 15 agents = make(map[string]*app.Agent) diff --git a/src/cmd/metric-scraper/app/app_suite_test.go b/src/cmd/metric-scraper/app/app_suite_test.go index 3c7b7c37..55a0c67c 100644 --- a/src/cmd/metric-scraper/app/app_suite_test.go +++ b/src/cmd/metric-scraper/app/app_suite_test.go @@ -3,7 +3,7 @@ package app_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/src/cmd/metric-scraper/app/metric_scraper_test.go b/src/cmd/metric-scraper/app/metric_scraper_test.go index 67c740a8..5bf5d9ba 100644 --- a/src/cmd/metric-scraper/app/metric_scraper_test.go +++ b/src/cmd/metric-scraper/app/metric_scraper_test.go @@ -17,7 +17,7 @@ import ( "code.cloudfoundry.org/tlsconfig" "google.golang.org/grpc/credentials" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gbytes" "google.golang.org/grpc" diff --git a/src/go.mod b/src/go.mod index 99f1e1de..ea75e252 100644 --- a/src/go.mod +++ b/src/go.mod @@ -10,7 +10,7 @@ require ( github.com/hashicorp/go-hclog v1.2.1 github.com/hashicorp/raft v1.3.9 github.com/nats-io/nats.go v1.16.0 - github.com/onsi/ginkgo v1.16.5 + github.com/onsi/ginkgo/v2 v2.1.4 github.com/onsi/gomega v1.19.0 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.34.0 @@ -28,6 +28,7 @@ require ( github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/golang/protobuf v1.5.2 // indirect + github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect github.com/hashicorp/go-immutable-radix v1.0.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/golang-lru v0.5.1 // indirect @@ -38,6 +39,7 @@ require ( github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect github.com/prometheus/client_golang v1.12.2 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/square/certstrap v1.2.0 // indirect @@ -45,7 +47,7 @@ require ( golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e // indirect + golang.org/x/tools v0.1.10 // indirect google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/src/go.sum b/src/go.sum index 218c767f..cb36c2fd 100644 --- a/src/go.sum +++ b/src/go.sum @@ -165,6 +165,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -189,6 +191,7 @@ github.com/hashicorp/raft v1.3.9/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7H github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -244,7 +247,8 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -517,8 +521,9 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/src/internal/testhelper/testhelper_suite_test.go b/src/internal/testhelper/testhelper_suite_test.go index 8424a3eb..e8fd8925 100644 --- a/src/internal/testhelper/testhelper_suite_test.go +++ b/src/internal/testhelper/testhelper_suite_test.go @@ -1,7 +1,7 @@ package testhelper_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "testing" diff --git a/src/pkg/scraper/config_provider_test.go b/src/pkg/scraper/config_provider_test.go index 8734346b..987e97b7 100644 --- a/src/pkg/scraper/config_provider_test.go +++ b/src/pkg/scraper/config_provider_test.go @@ -9,7 +9,7 @@ import ( "code.cloudfoundry.org/system-metrics-scraper/pkg/scraper" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/src/pkg/scraper/dns_ip_provider_test.go b/src/pkg/scraper/dns_ip_provider_test.go index 4ab7368b..052ac7c1 100644 --- a/src/pkg/scraper/dns_ip_provider_test.go +++ b/src/pkg/scraper/dns_ip_provider_test.go @@ -4,7 +4,7 @@ import ( "io/ioutil" "code.cloudfoundry.org/system-metrics-scraper/pkg/scraper" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/src/pkg/scraper/scraper_error_test.go b/src/pkg/scraper/scraper_error_test.go index f31001c9..91a7c67a 100644 --- a/src/pkg/scraper/scraper_error_test.go +++ b/src/pkg/scraper/scraper_error_test.go @@ -4,7 +4,7 @@ import ( "errors" "code.cloudfoundry.org/system-metrics-scraper/pkg/scraper" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/src/pkg/scraper/scraper_suite_test.go b/src/pkg/scraper/scraper_suite_test.go index 741b025c..aa2f8a5f 100644 --- a/src/pkg/scraper/scraper_suite_test.go +++ b/src/pkg/scraper/scraper_suite_test.go @@ -3,7 +3,7 @@ package scraper_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/src/pkg/scraper/scraper_test.go b/src/pkg/scraper/scraper_test.go index fe059676..b2689661 100644 --- a/src/pkg/scraper/scraper_test.go +++ b/src/pkg/scraper/scraper_test.go @@ -10,7 +10,7 @@ import ( metricshelper "code.cloudfoundry.org/go-metric-registry/testhelpers" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "code.cloudfoundry.org/go-loggregator" diff --git a/src/tools.go b/src/tools.go index 160f26ac..e4195cf3 100644 --- a/src/tools.go +++ b/src/tools.go @@ -4,5 +4,5 @@ package main import ( - _ "github.com/onsi/ginkgo/ginkgo" + _ "github.com/onsi/ginkgo/v2/ginkgo" ) diff --git a/src/vendor/github.com/google/pprof/AUTHORS b/src/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 00000000..fd736cb1 --- /dev/null +++ b/src/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/src/vendor/github.com/google/pprof/CONTRIBUTORS b/src/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 00000000..8c8c37d2 --- /dev/null +++ b/src/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/src/vendor/github.com/google/pprof/LICENSE b/src/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/src/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/vendor/github.com/google/pprof/profile/encode.go b/src/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 00000000..ab7f03ae --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,567 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + var tmp []Line + x.Line = append(tmp, x.Line...) // Shrink to allocated size + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + for _, s := range p.Sample { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + s.Location = make([]*Location, len(s.locationIDX)) + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/src/vendor/github.com/google/pprof/profile/filter.go b/src/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 00000000..ea8e66c6 --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,270 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/src/vendor/github.com/google/pprof/profile/index.go b/src/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 00000000..bef1d604 --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/src/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/src/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 00000000..91f45e53 --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/src/vendor/github.com/google/pprof/profile/legacy_profile.go b/src/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 00000000..0c8f3bb5 --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1225 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// The last stack trace is of the form: +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + line = "" + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/src/vendor/github.com/google/pprof/profile/merge.go b/src/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 00000000..9978e733 --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,481 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID map[uint64]*Location + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping. Add current values to the + // existing sample. + k := s.key() + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +// key generates sampleKey to be used as a key for maps. +func (sample *Sample) key() sampleKey { + ids := make([]string, len(sample.Location)) + for i, l := range sample.Location { + ids[i] = strconv.FormatUint(l.ID, 16) + } + + labels := make([]string, 0, len(sample.Label)) + for k, v := range sample.Label { + labels = append(labels, fmt.Sprintf("%q%q", k, v)) + } + sort.Strings(labels) + + numlabels := make([]string, 0, len(sample.NumLabel)) + for k, v := range sample.NumLabel { + numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + } + sort.Strings(numlabels) + + return sampleKey{ + strings.Join(ids, "|"), + strings.Join(labels, ""), + strings.Join(numlabels, ""), + } +} + +type sampleKey struct { + locations string + labels string + numlabels string +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l, ok := pm.locationsByID[src.ID]; ok { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID[src.ID] = ll + return ll + } + pm.locationsByID[src.ID] = l + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} diff --git a/src/vendor/github.com/google/pprof/profile/profile.go b/src/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 00000000..2590c8dd --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,805 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + Label map[string][]string + NumLabel map[string][]int64 + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = ioutil.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/src/vendor/github.com/google/pprof/profile/proto.go b/src/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 00000000..539ad3ab --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,370 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + tmp := make([]int64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, int64(u)) + } + *x = append(*x, tmp...) + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + tmp := make([]uint64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, u) + } + *x = append(*x, tmp...) + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/src/vendor/github.com/google/pprof/profile/prune.go b/src/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 00000000..02d21a81 --- /dev/null +++ b/src/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,178 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + break + } + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/.travis.yml b/src/vendor/github.com/onsi/ginkgo/.travis.yml deleted file mode 100644 index ea0966d5..00000000 --- a/src/vendor/github.com/onsi/ginkgo/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go -go: - - tip - - 1.16.x - - 1.15.x - -cache: - directories: - - $GOPATH/pkg/mod - -# allow internal package imports, necessary for forked repositories -go_import_path: github.com/onsi/ginkgo - -install: - - GO111MODULE="off" go get -v -t ./... - - GO111MODULE="off" go get golang.org/x/tools/cmd/cover - - GO111MODULE="off" go get github.com/onsi/gomega - - GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo - - export PATH=$GOPATH/bin:$PATH - -script: - - GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum - - go vet - - ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace diff --git a/src/vendor/github.com/onsi/ginkgo/README.md b/src/vendor/github.com/onsi/ginkgo/README.md deleted file mode 100644 index a25ca5e0..00000000 --- a/src/vendor/github.com/onsi/ginkgo/README.md +++ /dev/null @@ -1,169 +0,0 @@ -![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png) - -[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) - -Jump to the [docs](https://onsi.github.io/ginkgo/) | [δΈ­ζ–‡ζ–‡ζ‘£](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! - -If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). - -# Ginkgo 2.0 Release Candidate is available! - -An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion. - -As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711). - -Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide. - -## TLDR -Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests. -It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library. - -```go -Describe("the strings package", func() { - Context("strings.Contains()", func() { - When("the string contains the substring in the middle", func() { - It("returns `true`", func() { - Expect(strings.Contains("Ginkgo is awesome", "is")).To(BeTrue()) - }) - }) - }) -}) -``` - -## Feature List - -- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite) - -- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style: - - Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) - - [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown - - [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions - - [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). - - [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. - -- A comprehensive test runner that lets you: - - Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs) - - [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line - - Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. - - Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs) - -- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: - - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime - - `ginkgo -cover` runs your tests using Go's code coverage tool - - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package - - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression - - `ginkgo -r` runs all tests suites under the current directory - - `ginkgo -v` prints out identifying information for each tests just before it runs - - And much more: run `ginkgo help` for details! - - The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test` - -- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop! - -- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests) - -- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. - -- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. - -- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. - -- [Ginkgo tools for VSCode](https://marketplace.visualstudio.com/items?itemName=joselitofilho.ginkgotestexplorer): just use VSCode's extension installer to install `ginkgoTestExplorer`. - -- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details. - -- A modular architecture that lets you easily: - - Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). - - [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo - -## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library - -Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/) - -## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework - -Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org) - -## Getting Started - -You'll need the Go command-line tools. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed. - -### Global installation -To install the Ginkgo command line interface: -```bash -go get -u github.com/onsi/ginkgo/ginkgo -``` -Note that this will install it to `$GOBIN`, which will need to be in the `$PATH` (or equivalent). Run `go help install` for more information. - -### Go module ["tools package"](https://github.com/golang/go/issues/25922): -Create (or update) a file called `tools/tools.go` with the following contents: -```go -// +build tools - -package tools - -import ( - _ "github.com/onsi/ginkgo/ginkgo" -) - -// This file imports packages that are used when running go generate, or used -// during the development process but not otherwise depended on by built code. -``` -The Ginkgo command can then be run via `go run github.com/onsi/ginkgo/ginkgo`. -This approach allows the version of Ginkgo to be maintained under source control for reproducible results, -and is well suited to automated test pipelines. - -### Bootstrapping -```bash -cd path/to/package/you/want/to/test - -ginkgo bootstrap # set up a new ginkgo suite -ginkgo generate # will create a sample test file. edit this file and add your tests then... - -go test # to run your tests - -ginkgo # also runs your tests - -``` - -## I'm new to Go: What are my testing options? - -Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set. - -With that said, it's great to know what your options are :) - -### What Go gives you out of the box - -Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. - -### Matcher libraries for Go's XUnit style tests - -A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: - -- [testify](https://github.com/stretchr/testify) -- [gocheck](https://labix.org/gocheck) - -You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) - -### BDD style testing frameworks - -There are a handful of BDD-style testing frameworks written for Go. Here are a few: - -- [Ginkgo](https://github.com/onsi/ginkgo) ;) -- [GoConvey](https://github.com/smartystreets/goconvey) -- [Goblin](https://github.com/franela/goblin) -- [Mao](https://github.com/azer/mao) -- [Zen](https://github.com/pranavraja/zen) - -Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries. - -Go explore! - -## License - -Ginkgo is MIT-Licensed - -## Contributing - -See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/src/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/src/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go deleted file mode 100644 index ccd7685e..00000000 --- a/src/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ /dev/null @@ -1,681 +0,0 @@ -/* -Ginkgo is a BDD-style testing framework for Golang - -The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/ - -Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega) - -Ginkgo on Github: http://github.com/onsi/ginkgo - -Ginkgo is MIT-Licensed -*/ -package ginkgo - -import ( - "flag" - "fmt" - "io" - "net/http" - "os" - "reflect" - "strings" - "time" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/internal/codelocation" - "github.com/onsi/ginkgo/internal/global" - "github.com/onsi/ginkgo/internal/remote" - "github.com/onsi/ginkgo/internal/testingtproxy" - "github.com/onsi/ginkgo/internal/writer" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/reporters/stenographer" - colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" - "github.com/onsi/ginkgo/types" -) - -var deprecationTracker = types.NewDeprecationTracker() - -const GINKGO_VERSION = config.VERSION -const GINKGO_PANIC = ` -Your test failed. -Ginkgo panics to prevent subsequent assertions from running. -Normally Ginkgo rescues this panic so you shouldn't see it. - -But, if you make an assertion in a goroutine, Ginkgo can't capture the panic. -To circumvent this, you should call - - defer GinkgoRecover() - -at the top of the goroutine that caused this panic. -` - -func init() { - config.Flags(flag.CommandLine, "ginkgo", true) - GinkgoWriter = writer.New(os.Stdout) -} - -//GinkgoWriter implements an io.Writer -//When running in verbose mode any writes to GinkgoWriter will be immediately printed -//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen -//only if the current test fails. -var GinkgoWriter io.Writer - -//The interface by which Ginkgo receives *testing.T -type GinkgoTestingT interface { - Fail() -} - -//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is -//useful for seeding your own pseudorandom number generators (PRNGs) to ensure -//consistent executions from run to run, where your tests contain variability (for -//example, when selecting random test data). -func GinkgoRandomSeed() int64 { - return config.GinkgoConfig.RandomSeed -} - -//GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead -func GinkgoParallelNode() int { - deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1)) - return GinkgoParallelProcess() -} - -//GinkgoParallelProcess returns the parallel process number for the current ginkgo process -//The process number is 1-indexed -func GinkgoParallelProcess() int { - return config.GinkgoConfig.ParallelNode -} - -//Some matcher libraries or legacy codebases require a *testing.T -//GinkgoT implements an interface analogous to *testing.T and can be used if -//the library in question accepts *testing.T through an interface -// -// For example, with testify: -// assert.Equal(GinkgoT(), 123, 123, "they should be equal") -// -// Or with gomock: -// gomock.NewController(GinkgoT()) -// -// GinkgoT() takes an optional offset argument that can be used to get the -// correct line number associated with the failure. -func GinkgoT(optionalOffset ...int) GinkgoTInterface { - offset := 3 - if len(optionalOffset) > 0 { - offset = optionalOffset[0] - } - failedFunc := func() bool { - return CurrentGinkgoTestDescription().Failed - } - nameFunc := func() string { - return CurrentGinkgoTestDescription().FullTestText - } - return testingtproxy.New(GinkgoWriter, Fail, Skip, failedFunc, nameFunc, offset) -} - -//The interface returned by GinkgoT(). This covers most of the methods -//in the testing package's T. -type GinkgoTInterface interface { - Cleanup(func()) - Setenv(key, value string) - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fail() - FailNow() - Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Helper() - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Name() string - Parallel() - Skip(args ...interface{}) - SkipNow() - Skipf(format string, args ...interface{}) - Skipped() bool - TempDir() string -} - -//Custom Ginkgo test reporters must implement the Reporter interface. -// -//The custom reporter is passed in a SuiteSummary when the suite begins and ends, -//and a SpecSummary just before a spec begins and just after a spec ends -type Reporter reporters.Reporter - -//Asynchronous specs are given a channel of the Done type. You must close or write to the channel -//to tell Ginkgo that your async test is done. -type Done chan<- interface{} - -//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription -// FullTestText: a concatenation of ComponentTexts and the TestText -// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test -// TestText: the text in the actual It or Measure node -// IsMeasurement: true if the current test is a measurement -// FileName: the name of the file containing the current test -// LineNumber: the line number for the current test -// Failed: if the current test has failed, this will be true (useful in an AfterEach) -type GinkgoTestDescription struct { - FullTestText string - ComponentTexts []string - TestText string - - IsMeasurement bool - - FileName string - LineNumber int - - Failed bool - Duration time.Duration -} - -//CurrentGinkgoTestDescripton returns information about the current running test. -func CurrentGinkgoTestDescription() GinkgoTestDescription { - summary, ok := global.Suite.CurrentRunningSpecSummary() - if !ok { - return GinkgoTestDescription{} - } - - subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1] - - return GinkgoTestDescription{ - ComponentTexts: summary.ComponentTexts[1:], - FullTestText: strings.Join(summary.ComponentTexts[1:], " "), - TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1], - IsMeasurement: summary.IsMeasurement, - FileName: subjectCodeLocation.FileName, - LineNumber: subjectCodeLocation.LineNumber, - Failed: summary.HasFailureState(), - Duration: summary.RunTime, - } -} - -//Measurement tests receive a Benchmarker. -// -//You use the Time() function to time how long the passed in body function takes to run -//You use the RecordValue() function to track arbitrary numerical measurements. -//The RecordValueWithPrecision() function can be used alternatively to provide the unit -//and resolution of the numeric measurement. -//The optional info argument is passed to the test reporter and can be used to -// provide the measurement data to a custom reporter with context. -// -//See http://onsi.github.io/ginkgo/#benchmark_tests for more details -type Benchmarker interface { - Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) - RecordValue(name string, value float64, info ...interface{}) - RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) -} - -//RunSpecs is the entry point for the Ginkgo test runner. -//You must call this within a Golang testing TestX(t *testing.T) function. -// -//To bootstrap a test suite you can use the Ginkgo CLI: -// -// ginkgo bootstrap -func RunSpecs(t GinkgoTestingT, description string) bool { - specReporters := []Reporter{buildDefaultReporter()} - if config.DefaultReporterConfig.ReportFile != "" { - reportFile := config.DefaultReporterConfig.ReportFile - specReporters[0] = reporters.NewJUnitReporter(reportFile) - specReporters = append(specReporters, buildDefaultReporter()) - } - return runSpecsWithCustomReporters(t, description, specReporters) -} - -//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace -//RunSpecs() with this method. -func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) - specReporters = append(specReporters, buildDefaultReporter()) - return runSpecsWithCustomReporters(t, description, specReporters) -} - -//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace -//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter -func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) - return runSpecsWithCustomReporters(t, description, specReporters) -} - -func runSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - writer := GinkgoWriter.(*writer.Writer) - writer.SetStream(config.DefaultReporterConfig.Verbose) - reporters := make([]reporters.Reporter, len(specReporters)) - for i, reporter := range specReporters { - reporters[i] = reporter - } - passed, hasFocusedTests := global.Suite.Run(t, description, reporters, writer, config.GinkgoConfig) - - if deprecationTracker.DidTrackDeprecations() { - fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport()) - } - - if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { - fmt.Println("PASS | FOCUSED") - os.Exit(types.GINKGO_FOCUS_EXIT_CODE) - } - return passed -} - -func buildDefaultReporter() Reporter { - remoteReportingServer := config.GinkgoConfig.StreamHost - if remoteReportingServer == "" { - stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) - return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) - } else { - debugFile := "" - if config.GinkgoConfig.DebugParallel { - debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode) - } - return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile) - } -} - -//Skip notifies Ginkgo that the current spec was skipped. -func Skip(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - - global.Failer.Skip(message, codelocation.New(skip+1)) - panic(GINKGO_PANIC) -} - -//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) -func Fail(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - - global.Failer.Fail(message, codelocation.New(skip+1)) - panic(GINKGO_PANIC) -} - -//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` -//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that -//calls out to Gomega -// -//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent -//further assertions from running. This panic must be recovered. Ginkgo does this for you -//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...) -// -//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no -//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. -func GinkgoRecover() { - e := recover() - if e != nil { - global.Failer.Panic(codelocation.New(1), e) - } -} - -//Describe blocks allow you to organize your specs. A Describe block can contain any number of -//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. -// -//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally -//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts and Whens. -func Describe(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) - return true -} - -//You can focus the tests within a describe block using FDescribe -func FDescribe(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using PDescribe -func PDescribe(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using XDescribe -func XDescribe(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//Context blocks allow you to organize your specs. A Context block can contain any number of -//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. -// -//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally -//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts and Whens. -func Context(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) - return true -} - -//You can focus the tests within a describe block using FContext -func FContext(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using PContext -func PContext(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using XContext -func XContext(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//When blocks allow you to organize your specs. A When block can contain any number of -//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. -// -//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally -//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts and Whens. -func When(text string, body func()) bool { - global.Suite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1)) - return true -} - -//You can focus the tests within a describe block using FWhen -func FWhen(text string, body func()) bool { - global.Suite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using PWhen -func PWhen(text string, body func()) bool { - global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using XWhen -func XWhen(text string, body func()) bool { - global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks -//within an It block. -// -//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a -//function that accepts a Done channel. When you do this, you can also provide an optional timeout. -func It(text string, body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can focus individual Its using FIt -func FIt(text string, body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can mark Its as pending using PIt -func PIt(text string, _ ...interface{}) bool { - global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//You can mark Its as pending using XIt -func XIt(text string, _ ...interface{}) bool { - global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//Specify blocks are aliases for It blocks and allow for more natural wording in situations -//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks -//which apply to It blocks. -func Specify(text string, body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can focus individual Specifys using FSpecify -func FSpecify(text string, body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can mark Specifys as pending using PSpecify -func PSpecify(text string, is ...interface{}) bool { - global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//You can mark Specifys as pending using XSpecify -func XSpecify(text string, is ...interface{}) bool { - global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//By allows you to better document large Its. -// -//Generally you should try to keep your Its short and to the point. This is not always possible, however, -//especially in the context of integration tests that capture a particular workflow. -// -//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...) -//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. -func By(text string, callbacks ...func()) { - preamble := "\x1b[1mSTEP\x1b[0m" - if config.DefaultReporterConfig.NoColor { - preamble = "STEP" - } - fmt.Fprintln(GinkgoWriter, preamble+": "+text) - if len(callbacks) == 1 { - callbacks[0]() - } - if len(callbacks) > 1 { - panic("just one callback per By, please") - } -} - -//Measure blocks run the passed in body function repeatedly (determined by the samples argument) -//and accumulate metrics provided to the Benchmarker by the body function. -// -//The body function must have the signature: -// func(b Benchmarker) -func Measure(text string, body interface{}, samples int) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) - global.Suite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) - return true -} - -//You can focus individual Measures using FMeasure -func FMeasure(text string, body interface{}, samples int) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) - global.Suite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) - return true -} - -//You can mark Measurements as pending using PMeasure -func PMeasure(text string, _ ...interface{}) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) - global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//You can mark Measurements as pending using XMeasure -func XMeasure(text string, _ ...interface{}) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) - global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each -//parallel node process will call BeforeSuite. -// -//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel -// -//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. -func BeforeSuite(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed. -//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting. -// -//When running in parallel, each parallel node process will call AfterSuite. -// -//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel -// -//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. -func AfterSuite(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across -//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that -//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait -//until that node is done before running. -// -//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is -//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1) -//to the second function (on all the other nodes). -// -//The functions have the following signatures. The first function (which only runs on node 1) has the signature: -// -// func() []byte -// -//or, to run asynchronously: -// -// func(done Done) []byte -// -//The byte array returned by the first function is then passed to the second function, which has the signature: -// -// func(data []byte) -// -//or, to run asynchronously: -// -// func(data []byte, done Done) -// -//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes: -// -// var dbClient db.Client -// var dbRunner db.Runner -// -// var _ = SynchronizedBeforeSuite(func() []byte { -// dbRunner = db.NewRunner() -// err := dbRunner.Start() -// Ξ©(err).ShouldNot(HaveOccurred()) -// return []byte(dbRunner.URL) -// }, func(data []byte) { -// dbClient = db.NewClient() -// err := dbClient.Connect(string(data)) -// Ξ©(err).ShouldNot(HaveOccurred()) -// }) -func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { - global.Suite.SetSynchronizedBeforeSuiteNode( - node1Body, - allNodesBody, - codelocation.New(1), - parseTimeout(timeout...), - ) - return true -} - -//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up -//external singleton resources shared across nodes when running tests in parallel. -// -//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1 -//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until -//all other nodes are finished. -// -//Both functions have the same signature: either func() or func(done Done) to run asynchronously. -// -//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database -//only after all nodes have finished: -// -// var _ = SynchronizedAfterSuite(func() { -// dbClient.Cleanup() -// }, func() { -// dbRunner.Stop() -// }) -func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { - global.Suite.SetSynchronizedAfterSuiteNode( - allNodesBody, - node1Body, - codelocation.New(1), - parseTimeout(timeout...), - ) - return true -} - -//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested -//Describe and Context blocks the outermost BeforeEach blocks are run first. -// -//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func BeforeEach(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details, -//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) -// -//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func JustBeforeEach(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details, -//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) -// -//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func JustAfterEach(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested -//Describe and Context blocks the innermost AfterEach blocks are run first. -// -//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func AfterEach(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -func validateBodyFunc(body interface{}, cl types.CodeLocation) { - t := reflect.TypeOf(body) - if t.Kind() != reflect.Func { - return - } - - if t.NumOut() > 0 { - return - } - - if t.NumIn() == 0 { - return - } - - if t.In(0) == reflect.TypeOf(make(Done)) { - deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl) - } -} - -func parseTimeout(timeout ...float64) time.Duration { - if len(timeout) == 0 { - return global.DefaultTimeout - } else { - return time.Duration(timeout[0] * float64(time.Second)) - } -} diff --git a/src/vendor/github.com/onsi/ginkgo/internal/global/init.go b/src/vendor/github.com/onsi/ginkgo/internal/global/init.go deleted file mode 100644 index 109f617a..00000000 --- a/src/vendor/github.com/onsi/ginkgo/internal/global/init.go +++ /dev/null @@ -1,22 +0,0 @@ -package global - -import ( - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/internal/suite" -) - -const DefaultTimeout = time.Duration(1 * time.Second) - -var Suite *suite.Suite -var Failer *failer.Failer - -func init() { - InitializeGlobals() -} - -func InitializeGlobals() { - Failer = failer.New() - Suite = suite.New(Failer) -} diff --git a/src/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/src/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go deleted file mode 100644 index a0b8b62d..00000000 --- a/src/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go +++ /dev/null @@ -1,15 +0,0 @@ -package specrunner - -import ( - "crypto/rand" - "fmt" -) - -func randomID() string { - b := make([]byte, 8) - _, err := rand.Read(b) - if err != nil { - return "" - } - return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8]) -} diff --git a/src/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/src/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go deleted file mode 100644 index c9a0a60d..00000000 --- a/src/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go +++ /dev/null @@ -1,411 +0,0 @@ -package specrunner - -import ( - "fmt" - "os" - "os/signal" - "sync" - "syscall" - - "github.com/onsi/ginkgo/internal/spec_iterator" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/internal/leafnodes" - "github.com/onsi/ginkgo/internal/spec" - Writer "github.com/onsi/ginkgo/internal/writer" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/types" - - "time" -) - -type SpecRunner struct { - description string - beforeSuiteNode leafnodes.SuiteNode - iterator spec_iterator.SpecIterator - afterSuiteNode leafnodes.SuiteNode - reporters []reporters.Reporter - startTime time.Time - suiteID string - runningSpec *spec.Spec - writer Writer.WriterInterface - config config.GinkgoConfigType - interrupted bool - processedSpecs []*spec.Spec - lock *sync.Mutex -} - -func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { - return &SpecRunner{ - description: description, - beforeSuiteNode: beforeSuiteNode, - iterator: iterator, - afterSuiteNode: afterSuiteNode, - reporters: reporters, - writer: writer, - config: config, - suiteID: randomID(), - lock: &sync.Mutex{}, - } -} - -func (runner *SpecRunner) Run() bool { - if runner.config.DryRun { - runner.performDryRun() - return true - } - - runner.reportSuiteWillBegin() - signalRegistered := make(chan struct{}) - go runner.registerForInterrupts(signalRegistered) - <-signalRegistered - - suitePassed := runner.runBeforeSuite() - - if suitePassed { - suitePassed = runner.runSpecs() - } - - runner.blockForeverIfInterrupted() - - suitePassed = runner.runAfterSuite() && suitePassed - - runner.reportSuiteDidEnd(suitePassed) - - return suitePassed -} - -func (runner *SpecRunner) performDryRun() { - runner.reportSuiteWillBegin() - - if runner.beforeSuiteNode != nil { - summary := runner.beforeSuiteNode.Summary() - summary.State = types.SpecStatePassed - runner.reportBeforeSuite(summary) - } - - for { - spec, err := runner.iterator.Next() - if err == spec_iterator.ErrClosed { - break - } - if err != nil { - fmt.Println("failed to iterate over tests:\n" + err.Error()) - break - } - - runner.processedSpecs = append(runner.processedSpecs, spec) - - summary := spec.Summary(runner.suiteID) - runner.reportSpecWillRun(summary) - if summary.State == types.SpecStateInvalid { - summary.State = types.SpecStatePassed - } - runner.reportSpecDidComplete(summary, false) - } - - if runner.afterSuiteNode != nil { - summary := runner.afterSuiteNode.Summary() - summary.State = types.SpecStatePassed - runner.reportAfterSuite(summary) - } - - runner.reportSuiteDidEnd(true) -} - -func (runner *SpecRunner) runBeforeSuite() bool { - if runner.beforeSuiteNode == nil || runner.wasInterrupted() { - return true - } - - runner.writer.Truncate() - conf := runner.config - passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) - if !passed { - runner.writer.DumpOut() - } - runner.reportBeforeSuite(runner.beforeSuiteNode.Summary()) - return passed -} - -func (runner *SpecRunner) runAfterSuite() bool { - if runner.afterSuiteNode == nil { - return true - } - - runner.writer.Truncate() - conf := runner.config - passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) - if !passed { - runner.writer.DumpOut() - } - runner.reportAfterSuite(runner.afterSuiteNode.Summary()) - return passed -} - -func (runner *SpecRunner) runSpecs() bool { - suiteFailed := false - skipRemainingSpecs := false - for { - spec, err := runner.iterator.Next() - if err == spec_iterator.ErrClosed { - break - } - if err != nil { - fmt.Println("failed to iterate over tests:\n" + err.Error()) - suiteFailed = true - break - } - - runner.processedSpecs = append(runner.processedSpecs, spec) - - if runner.wasInterrupted() { - break - } - if skipRemainingSpecs { - spec.Skip() - } - - if !spec.Skipped() && !spec.Pending() { - if passed := runner.runSpec(spec); !passed { - suiteFailed = true - } - } else if spec.Pending() && runner.config.FailOnPending { - runner.reportSpecWillRun(spec.Summary(runner.suiteID)) - suiteFailed = true - runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) - } else { - runner.reportSpecWillRun(spec.Summary(runner.suiteID)) - runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) - } - - if spec.Failed() && runner.config.FailFast { - skipRemainingSpecs = true - } - } - - return !suiteFailed -} - -func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) { - maxAttempts := 1 - if runner.config.FlakeAttempts > 0 { - // uninitialized configs count as 1 - maxAttempts = runner.config.FlakeAttempts - } - - for i := 0; i < maxAttempts; i++ { - runner.reportSpecWillRun(spec.Summary(runner.suiteID)) - runner.runningSpec = spec - spec.Run(runner.writer) - runner.runningSpec = nil - runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) - if !spec.Failed() { - return true - } - } - return false -} - -func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { - if runner.runningSpec == nil { - return nil, false - } - - return runner.runningSpec.Summary(runner.suiteID), true -} - -func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - close(signalRegistered) - - <-c - signal.Stop(c) - runner.markInterrupted() - go runner.registerForHardInterrupts() - runner.writer.DumpOutWithHeader(` -Received interrupt. Emitting contents of GinkgoWriter... ---------------------------------------------------------- -`) - if runner.afterSuiteNode != nil { - fmt.Fprint(os.Stderr, ` ---------------------------------------------------------- -Received interrupt. Running AfterSuite... -^C again to terminate immediately -`) - runner.runAfterSuite() - } - runner.reportSuiteDidEnd(false) - os.Exit(1) -} - -func (runner *SpecRunner) registerForHardInterrupts() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - - <-c - fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.") - os.Exit(1) -} - -func (runner *SpecRunner) blockForeverIfInterrupted() { - runner.lock.Lock() - interrupted := runner.interrupted - runner.lock.Unlock() - - if interrupted { - select {} - } -} - -func (runner *SpecRunner) markInterrupted() { - runner.lock.Lock() - defer runner.lock.Unlock() - runner.interrupted = true -} - -func (runner *SpecRunner) wasInterrupted() bool { - runner.lock.Lock() - defer runner.lock.Unlock() - return runner.interrupted -} - -func (runner *SpecRunner) reportSuiteWillBegin() { - runner.startTime = time.Now() - summary := runner.suiteWillBeginSummary() - for _, reporter := range runner.reporters { - reporter.SpecSuiteWillBegin(runner.config, summary) - } -} - -func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) { - for _, reporter := range runner.reporters { - reporter.BeforeSuiteDidRun(summary) - } -} - -func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) { - for _, reporter := range runner.reporters { - reporter.AfterSuiteDidRun(summary) - } -} - -func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { - runner.writer.Truncate() - - for _, reporter := range runner.reporters { - reporter.SpecWillRun(summary) - } -} - -func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { - if len(summary.CapturedOutput) == 0 { - summary.CapturedOutput = string(runner.writer.Bytes()) - } - for i := len(runner.reporters) - 1; i >= 1; i-- { - runner.reporters[i].SpecDidComplete(summary) - } - - if failed { - runner.writer.DumpOut() - } - - runner.reporters[0].SpecDidComplete(summary) -} - -func (runner *SpecRunner) reportSuiteDidEnd(success bool) { - summary := runner.suiteDidEndSummary(success) - summary.RunTime = time.Since(runner.startTime) - for _, reporter := range runner.reporters { - reporter.SpecSuiteDidEnd(summary) - } -} - -func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) { - count = 0 - - for _, spec := range runner.processedSpecs { - if filter(spec) { - count++ - } - } - - return count -} - -func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary { - numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return !ex.Skipped() && !ex.Pending() - }) - - numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Pending() - }) - - numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Skipped() - }) - - numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Passed() - }) - - numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Flaked() - }) - - numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Failed() - }) - - if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { - var known bool - numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() - if !known { - numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration() - } - numberOfFailedSpecs = numberOfSpecsThatWillBeRun - } - - return &types.SuiteSummary{ - SuiteDescription: runner.description, - SuiteSucceeded: success, - SuiteID: runner.suiteID, - - NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), - NumberOfTotalSpecs: len(runner.processedSpecs), - NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, - NumberOfPendingSpecs: numberOfPendingSpecs, - NumberOfSkippedSpecs: numberOfSkippedSpecs, - NumberOfPassedSpecs: numberOfPassedSpecs, - NumberOfFailedSpecs: numberOfFailedSpecs, - NumberOfFlakedSpecs: numberOfFlakedSpecs, - } -} - -func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary { - numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown() - if !known { - numTotal = -1 - } - - numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() - if !known { - numToRun = -1 - } - - return &types.SuiteSummary{ - SuiteDescription: runner.description, - SuiteID: runner.suiteID, - - NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), - NumberOfTotalSpecs: numTotal, - NumberOfSpecsThatWillBeRun: numToRun, - NumberOfPendingSpecs: -1, - NumberOfSkippedSpecs: -1, - NumberOfPassedSpecs: -1, - NumberOfFailedSpecs: -1, - NumberOfFlakedSpecs: -1, - } -} diff --git a/src/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/src/vendor/github.com/onsi/ginkgo/internal/suite/suite.go deleted file mode 100644 index b4a83c43..00000000 --- a/src/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ /dev/null @@ -1,227 +0,0 @@ -package suite - -import ( - "math/rand" - "net/http" - "time" - - "github.com/onsi/ginkgo/internal/spec_iterator" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/internal/containernode" - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/internal/leafnodes" - "github.com/onsi/ginkgo/internal/spec" - "github.com/onsi/ginkgo/internal/specrunner" - "github.com/onsi/ginkgo/internal/writer" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/types" -) - -type ginkgoTestingT interface { - Fail() -} - -type deferredContainerNode struct { - text string - body func() - flag types.FlagType - codeLocation types.CodeLocation -} - -type Suite struct { - topLevelContainer *containernode.ContainerNode - currentContainer *containernode.ContainerNode - - deferredContainerNodes []deferredContainerNode - - containerIndex int - beforeSuiteNode leafnodes.SuiteNode - afterSuiteNode leafnodes.SuiteNode - runner *specrunner.SpecRunner - failer *failer.Failer - running bool - expandTopLevelNodes bool -} - -func New(failer *failer.Failer) *Suite { - topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) - - return &Suite{ - topLevelContainer: topLevelContainer, - currentContainer: topLevelContainer, - failer: failer, - containerIndex: 1, - deferredContainerNodes: []deferredContainerNode{}, - } -} - -func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) { - if config.ParallelTotal < 1 { - panic("ginkgo.parallel.total must be >= 1") - } - - if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 { - panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") - } - - suite.expandTopLevelNodes = true - for _, deferredNode := range suite.deferredContainerNodes { - suite.PushContainerNode(deferredNode.text, deferredNode.body, deferredNode.flag, deferredNode.codeLocation) - } - - r := rand.New(rand.NewSource(config.RandomSeed)) - suite.topLevelContainer.Shuffle(r) - iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config) - suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config) - - suite.running = true - success := suite.runner.Run() - if !success { - t.Fail() - } - return success, hasProgrammaticFocus -} - -func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) { - specsSlice := []*spec.Spec{} - suite.topLevelContainer.BackPropagateProgrammaticFocus() - for _, collatedNodes := range suite.topLevelContainer.Collate() { - specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress)) - } - - specs := spec.NewSpecs(specsSlice) - specs.RegexScansFilePath = config.RegexScansFilePath - - if config.RandomizeAllSpecs { - specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) - } - - specs.ApplyFocus(description, config.FocusStrings, config.SkipStrings) - - if config.SkipMeasurements { - specs.SkipMeasurements() - } - - var iterator spec_iterator.SpecIterator - - if config.ParallelTotal > 1 { - iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost) - resp, err := http.Get(config.SyncHost + "/has-counter") - if err != nil || resp.StatusCode != http.StatusOK { - iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode) - } - } else { - iterator = spec_iterator.NewSerialIterator(specs.Specs()) - } - - return iterator, specs.HasProgrammaticFocus() -} - -func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { - if !suite.running { - return nil, false - } - return suite.runner.CurrentSpecSummary() -} - -func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.beforeSuiteNode != nil { - panic("You may only call BeforeSuite once!") - } - suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.afterSuiteNode != nil { - panic("You may only call AfterSuite once!") - } - suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.beforeSuiteNode != nil { - panic("You may only call BeforeSuite once!") - } - suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.afterSuiteNode != nil { - panic("You may only call AfterSuite once!") - } - suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { - /* - We defer walking the container nodes (which immediately evaluates the `body` function) - until `RunSpecs` is called. We do this by storing off the deferred container nodes. Then, when - `RunSpecs` is called we actually go through and add the container nodes to the test structure. - - This allows us to defer calling all the `body` functions until _after_ the top level functions - have been walked, _after_ func init()s have been called, and _after_ `go test` has called `flag.Parse()`. - - This allows users to load up configuration information in the `TestX` go test hook just before `RunSpecs` - is invoked and solves issues like #693 and makes the lifecycle easier to reason about. - - */ - if !suite.expandTopLevelNodes { - suite.deferredContainerNodes = append(suite.deferredContainerNodes, deferredContainerNode{text, body, flag, codeLocation}) - return - } - - container := containernode.New(text, flag, codeLocation) - suite.currentContainer.PushContainerNode(container) - - previousContainer := suite.currentContainer - suite.currentContainer = container - suite.containerIndex++ - - body() - - suite.containerIndex-- - suite.currentContainer = previousContainer -} - -func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { - if suite.running { - suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} diff --git a/src/vendor/github.com/onsi/ginkgo/.gitignore b/src/vendor/github.com/onsi/ginkgo/v2/.gitignore similarity index 79% rename from src/vendor/github.com/onsi/ginkgo/.gitignore rename to src/vendor/github.com/onsi/ginkgo/v2/.gitignore index b9f9659d..edf0231c 100644 --- a/src/vendor/github.com/onsi/ginkgo/.gitignore +++ b/src/vendor/github.com/onsi/ginkgo/v2/.gitignore @@ -1,7 +1,7 @@ .DS_Store -TODO +TODO.md tmp/**/* *.coverprofile .vscode .idea/ -*.log +*.log \ No newline at end of file diff --git a/src/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/src/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md similarity index 89% rename from src/vendor/github.com/onsi/ginkgo/CHANGELOG.md rename to src/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index a26bc530..66e313d0 100644 --- a/src/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/src/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,49 @@ +## 2.1.4 + +### Fixes +- Numerous documentation typos +- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903] +- improve error message when a parallel process fails to report back [a7bd1fe] +- guard against concurrent map writes in DeprecationTracker [0976569] +- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480] +- Fix ginkgo import circle [f779385] + +## 2.1.3 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +### Fixes +- Calling By in a container node now emits a useful error. [ff12cee] + +## 2.1.2 + +### Fixes + +- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1] +- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02] +- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them! + +## 2.1.1 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +### Fixes +- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17] + +## 2.1.0 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +2.1.0 is a minor release with a few tweaks: + +- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo). +- Add error check for invalid/nil parameters to DescribeTable [6f8577e] +- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e] + +## 2.0.0 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) + ## 1.16.5 Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC. @@ -23,7 +69,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme ## 1.16.1 ### Fixes -- Supress --stream deprecation warning on windows (#793) +- Suppress --stream deprecation warning on windows (#793) ## 1.16.0 @@ -35,7 +81,6 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - Add slim-sprig template functions to bootstrap/generate (#775) [9162b86] -### Fixes - Fix accidental reference to 1488 (#784) [9fb7fe4] ## 1.15.2 @@ -202,7 +247,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - fix: for `go vet` to pass [69338ec] - docs: fix for contributing instructions [7004cb1] - consolidate and streamline contribution docs (#494) [d848015] -- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6] +- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6] - all: gofmt [000d317] - Increase eventually timeout to 30s [c73579c] - Clarify asynchronous test behaviour [294d8f4] @@ -314,7 +359,7 @@ Bug Fixes: - Fix incorrect failure message when a panic occurs during a parallel test run - Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests. - Be more consistent about handling SIGTERM as well as SIGINT -- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. +- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. - Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!) ## 1.1.0 (8/2/2014) diff --git a/src/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/src/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md similarity index 50% rename from src/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md rename to src/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 908b95c2..15079406 100644 --- a/src/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md +++ b/src/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -6,28 +6,8 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - Ensure adequate test coverage: - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. -- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch. - If relevant, please submit a docs PR to that branch alongside your code PR. +- Make sure all the tests succeed via `ginkgo -r -p` +- Vet your changes via `go vet ./...` +- Update the documentation. Ginko uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. -Thanks for supporting Ginkgo! - -## Setup - -Fork the repo, then: - -``` -go get github.com/onsi/ginkgo -go get github.com/onsi/gomega/... -cd $GOPATH/src/github.com/onsi/ginkgo -git remote add fork git@github.com:/ginkgo.git - -ginkgo -r -p # ensure tests are green -go vet ./... # ensure linter is happy -``` - -## Making the PR - - go to a new branch `git checkout -b my-feature` - - make your changes - - run tests and linter again (see above) - - `git push fork` - - open PR πŸŽ‰ +Thanks for supporting Ginkgo! \ No newline at end of file diff --git a/src/vendor/github.com/onsi/ginkgo/v2/LICENSE b/src/vendor/github.com/onsi/ginkgo/v2/LICENSE new file mode 100644 index 00000000..9415ee72 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/onsi/ginkgo/v2/README.md b/src/vendor/github.com/onsi/ginkgo/v2/README.md new file mode 100644 index 00000000..58507c36 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/README.md @@ -0,0 +1,119 @@ +![Ginkgo](https://onsi.github.io/ginkgo/images/ginkgo.png) + +[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/) + +--- + +# Ginkgo 2.0 is now Generally Available! + +You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)! + +--- + +Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly: + +```go +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + ... +) + +Describe("Checking books out of the library", Label("library"), func() { + var library *libraries.Library + var book *books.Book + var valjean *users.User + BeforeEach(func() { + library = libraries.NewClient() + book = &books.Book{ + Title: "Les Miserables", + Author: "Victor Hugo", + } + valjean = users.NewUser("Jean Valjean") + }) + + When("the library has the book in question", func() { + BeforeEach(func() { + Expect(library.Store(book)).To(Succeed()) + }) + + Context("and the book is available", func() { + It("lends it to the reader", func() { + Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books()).To(ContainElement(book)) + Expect(library.UserWithBook(book)).To(Equal(valjean)) + }) + }) + + Context("but the book has already been checked out", func() { + var javert *users.User + BeforeEach(func() { + javert = users.NewUser("Javert") + Expect(javert.Checkout(library, "Les Miserables")).To(Succeed()) + }) + + It("tells the user", func() { + err := valjean.Checkout(library, "Les Miserables") + Expect(error).To(MatchError("Les Miserables is currently checked out")) + }) + + It("lets the user place a hold and get notified later", func() { + Expect(valjean.Hold(library, "Les Miserables")).To(Succeed()) + Expect(valjean.Holds()).To(ContainElement(book)) + + By("when Javert returns the book") + Expect(javert.Return(library, book)).To(Succeed()) + + By("it eventually informs Valjean") + notification := "Les Miserables is ready for pick up" + Eventually(valjean.Notifications).Should(ContainElement(notification)) + + Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books()).To(ContainElement(book)) + Expect(valjean.Holds()).To(BeEmpty()) + }) + }) + }) + + When("the library does not have the book in question", func() { + It("tells the reader the book is unavailable", func() { + err := valjean.Checkout(library, "Les Miserables") + Expect(error).To(MatchError("Les Miserables is not in the library catalog")) + }) + }) +}) +``` + +Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first specs](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite). + +If you have a question, comment, bug report, feature request, etc. please open a [GitHub issue](https://github.com/onsi/ginkgo/issues/new), or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). + +## Capabilities + +Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://olivinelabs.com/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. + +With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs) + +At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as + +```bash +ginkgo -p +``` + +By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. + +As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). + +Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development. + +And that's just Ginkgo! [Gomega](https://onsi.github.io/gomega/) brings a rich, mature, family of [assertions and matchers](https://onsi.github.io/gomega/#provided-matchers) to your suites. With Gomega you can easily mix [synchronous and asynchronous assertions](https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing) in your specs. You can even build your own set of expressive domain-specific matchers quickly and easily by composing Gomega's [existing building blocks](https://onsi.github.io/ginkgo/#building-custom-matchers). + +Happy Testing! + +## License + +Ginkgo is MIT-Licensed + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/src/vendor/github.com/onsi/ginkgo/RELEASING.md b/src/vendor/github.com/onsi/ginkgo/v2/RELEASING.md similarity index 93% rename from src/vendor/github.com/onsi/ginkgo/RELEASING.md rename to src/vendor/github.com/onsi/ginkgo/v2/RELEASING.md index db3d234c..0c80f668 100644 --- a/src/vendor/github.com/onsi/ginkgo/RELEASING.md +++ b/src/vendor/github.com/onsi/ginkgo/v2/RELEASING.md @@ -7,7 +7,7 @@ A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: - New Features (minor version) - Fixes (fix version) - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) -1. Update `VERSION` in `config/config.go` +1. Update `VERSION` in `types/version.go` 1. Commit, push, and release: ``` git commit -m "vM.m.p" diff --git a/src/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/src/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go new file mode 100644 index 00000000..a61021d0 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go @@ -0,0 +1,69 @@ +package config + +// GinkgoConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type GinkgoConfigType = DeprecatedGinkgoConfigType +type DeprecatedGinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusStrings []string + SkipStrings []string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +// DefaultReporterConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType +type DeprecatedDefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool + ReportPassed bool + ReportFile string +} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/src/vendor/github.com/onsi/ginkgo/v2/core_dsl.go new file mode 100644 index 00000000..bdb90860 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -0,0 +1,687 @@ +/* +Ginkgo is a testing framework for Go designed to help you write expressive tests. +https://github.com/onsi/ginkgo +MIT-Licensed + +The godoc documentation outlines Ginkgo's API. Since Ginkgo is a Domain-Specific Language it is important to +build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that. +You should start there - even a brief skim will be helpful. At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter. + +Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega + +You can run Ginkgo specs with go test - however we recommend using the ginkgo cli. It enables functionality +that go test does not (especially running suites in parallel). You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview +or by running 'ginkgo help'. +*/ +package ginkgo + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +const GINKGO_VERSION = types.VERSION + +var flagSet types.GinkgoFlagSet +var deprecationTracker = types.NewDeprecationTracker() +var suiteConfig = types.NewDefaultSuiteConfig() +var reporterConfig = types.NewDefaultReporterConfig() +var suiteDidRun = false +var outputInterceptor internal.OutputInterceptor +var client parallel_support.Client + +func init() { + var err error + flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig) + exitIfErr(err) + GinkgoWriter = internal.NewWriter(os.Stdout) +} + +func exitIfErr(err error) { + if err != nil { + if outputInterceptor != nil { + outputInterceptor.Shutdown() + } + if client != nil { + client.Close() + } + fmt.Fprintln(formatter.ColorableStdErr, err.Error()) + os.Exit(1) + } +} + +func exitIfErrors(errors []error) { + if len(errors) > 0 { + if outputInterceptor != nil { + outputInterceptor.Shutdown() + } + if client != nil { + client.Close() + } + for _, err := range errors { + fmt.Fprintln(formatter.ColorableStdErr, err.Error()) + } + os.Exit(1) + } +} + +//The interface implemented by GinkgoWriter +type GinkgoWriterInterface interface { + io.Writer + + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) + + TeeTo(writer io.Writer) + ClearTeeWriters() +} + +/* +GinkgoWriter implements a GinkgoWriterInterface and io.Writer + +When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed +to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen +only if the current test fails. + +GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer). +Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers. You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters() + +You can learn more at https://onsi.github.io/ginkgo/#logging-output +*/ +var GinkgoWriter GinkgoWriterInterface + +//The interface by which Ginkgo receives *testing.T +type GinkgoTestingT interface { + Fail() +} + +/* +GinkgoConfiguration returns the configuration of the current suite. + +The first return value is the SuiteConfig which controls aspects of how the suite runs, +the second return value is the ReporterConfig which controls aspects of how Ginkgo's default +reporter emits output. + +Mutating the returned configurations has no effect. To reconfigure Ginkgo programmatically you need +to pass in your mutated copies into RunSpecs(). + +You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite +*/ +func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) { + return suiteConfig, reporterConfig +} + +/* +GinkgoRandomSeed returns the seed used to randomize spec execution order. It is +useful for seeding your own pseudorandom number generators to ensure +consistent executions from run to run, where your tests contain variability (for +example, when selecting random spec data). + +You can learn more at https://onsi.github.io/ginkgo/#spec-randomization +*/ +func GinkgoRandomSeed() int64 { + return suiteConfig.RandomSeed +} + +/* +GinkgoParallelProcess returns the parallel process number for the current ginkgo process +The process number is 1-indexed. You can use GinkgoParallelProcess() to shard access to shared +resources across your suites. You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs + +For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization +*/ +func GinkgoParallelProcess() int { + return suiteConfig.ParallelProcess +} + +/* +PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant +when running in parallel and output to stdout/stderr is being intercepted. You generally +don't need to call this function - however there are cases when Ginkgo's output interception +mechanisms can interfere with external processes launched by the test process. + +In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr +then Ginkgo's output interceptor will hang. To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter. +If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process +then ResumeOutputInterception() after starting it. + +Note that PauseOutputInterception() does not cause stdout writes to print to the console - +this simply stops intercepting and storing stdout writes to an internal buffer. +*/ +func PauseOutputInterception() { + if outputInterceptor == nil { + return + } + outputInterceptor.PauseIntercepting() +} + +//ResumeOutputInterception() - see docs for PauseOutputInterception() +func ResumeOutputInterception() { + if outputInterceptor == nil { + return + } + outputInterceptor.ResumeIntercepting() +} + +/* +RunSpecs is the entry point for the Ginkgo spec runner. + +You must call this within a Golang testing TestX(t *testing.T) function. +If you bootstrapped your suite with "ginkgo bootstrap" this is already +done for you. + +Ginkgo is typically configured via command-line flags. This configuration +can be overridden, however, and passed into RunSpecs as optional arguments: + + func TestMySuite(t *testing.T) { + RegisterFailHandler(gomega.Fail) + // fetch the current config + suiteConfig, reporterConfig := GinkgoConfiguration() + // adjust it + suiteConfig.SkipStrings = []string{"NEVER-RUN"} + reporterConfig.FullTrace = true + // pass it in to RunSpecs + RunSpecs(t, "My Suite", suiteConfig, reporterConfig) + } + +Note that some configuration changes can lead to undefined behavior. For example, +you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible +for setting these and orchestrating parallel specs across the parallel processes. See http://onsi.github.io/ginkgo/#spec-parallelization +for more on how specs are parallelized in Ginkgo. + +You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite. +*/ +func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { + if suiteDidRun { + exitIfErr(types.GinkgoErrors.RerunningSuite()) + } + suiteDidRun = true + + suiteLabels := Labels{} + configErrors := []error{} + for _, arg := range args { + switch arg := arg.(type) { + case types.SuiteConfig: + suiteConfig = arg + case types.ReporterConfig: + reporterConfig = arg + case Labels: + suiteLabels = append(suiteLabels, arg...) + default: + configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) + } + } + exitIfErrors(configErrors) + + configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig) + if len(configErrors) > 0 { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n")) + for _, err := range configErrors { + fmt.Fprintf(formatter.ColorableStdErr, err.Error()) + } + os.Exit(1) + } + + var reporter reporters.Reporter + if suiteConfig.ParallelTotal == 1 { + reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut) + outputInterceptor = internal.NoopOutputInterceptor{} + client = nil + } else { + reporter = reporters.NoopReporter{} + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "swap": + outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor() + case "none": + outputInterceptor = internal.NoopOutputInterceptor{} + default: + outputInterceptor = internal.NewOutputInterceptor() + } + client = parallel_support.NewClient(suiteConfig.ParallelHost) + if !client.Connect() { + client = nil + exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost)) + } + defer client.Close() + } + + writer := GinkgoWriter.(*internal.Writer) + if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 { + writer.SetMode(internal.WriterModeStreamAndBuffer) + } else { + writer.SetMode(internal.WriterModeBufferOnly) + } + + if reporterConfig.WillGenerateReport() { + registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig) + } + + err := global.Suite.BuildTree() + exitIfErr(err) + + suitePath, err := os.Getwd() + exitIfErr(err) + suitePath, err = filepath.Abs(suitePath) + exitIfErr(err) + + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(suiteConfig.Timeout, client), client, suiteConfig) + outputInterceptor.Shutdown() + + flagSet.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport()) + } + + if !passed { + t.Fail() + } + + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Println("PASS | FOCUSED") + os.Exit(types.GINKGO_FOCUS_EXIT_CODE) + } + return passed +} + +/* +Skip instructs Ginkgo to skip the current spec + +You can call Skip in any Setup or Subject node closure. + +For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs +*/ +func Skip(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.Skip(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) + +Under the hood, Fail panics to end execution of the current spec. Ginkgo will catch this panic and proceed with +the subsequent spec. If you call Fail, or make an assertion, within a goroutine launched by your spec you must +add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail. + +You can call Fail in any Setup or Subject node closure. + +You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure +*/ +func Fail(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.Fail(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite. + +You can call AbortSuite in any Setup or Subject node closure. + +You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites +*/ +func AbortSuite(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.AbortSuite(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` +Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that +calls out to Gomega + +Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent +further assertions from running. This panic must be recovered. Normally, Ginkgo recovers the panic for you, +however if a panic originates on a goroutine *launched* from one of your specs there's no +way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. + +You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure +*/ +func GinkgoRecover() { + e := recover() + if e != nil { + global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e) + } +} + +// pushNode is used by the various test construction DSL methods to push nodes onto the suite +// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits +func pushNode(node internal.Node, errors []error) bool { + exitIfErrors(errors) + exitIfErr(global.Suite.PushNode(node)) + return true +} + +/* +Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of +Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It). + +Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow. It is idomatic +to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens. + +You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes +In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func Describe(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +FDescribe focuses specs within the Describe block. +*/ +func FDescribe(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +PDescribe marks specs within the Describe block as pending. +*/ +func PDescribe(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +XDescribe marks specs within the Describe block as pending. + +XDescribe is an alias for PDescribe +*/ +var XDescribe = PDescribe + +/* Context is an alias for Describe - it generates the exact same kind of Container node */ +var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func When(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func FWhen(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func PWhen(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +var XWhen = PWhen + +/* +It nodes are Subject nodes that contain your spec code and assertions. + +Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure. + +You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it +In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func It(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +FIt allows you to focus an individual It. +*/ +func FIt(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +PIt allows you to mark an individual It as pending. +*/ +func PIt(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +XIt allows you to mark an individual It as pending. + +XIt is an alias for PIt +*/ +var XIt = PIt + +/* +Specify is an alias for It - it can allow for more natural wording in some context. +*/ +var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt + +/* +By allows you to better document complex Specs. + +Generally you should try to keep your Its short and to the point. This is not always possible, however, +especially in the context of integration tests that capture complex or lengthy workflows. + +By allows you to document such flows. By may be called within a Setup or Subject node (It, BeforeEach, etc...) +and will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. + +By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports. + +Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry +You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by +*/ +func By(text string, callback ...func()) { + if !global.Suite.InRunPhase() { + exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1))) + } + value := struct { + Text string + Duration time.Duration + }{ + Text: text, + } + t := time.Now() + AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t) + formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor) + GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT))) + if len(callback) == 1 { + callback[0]() + value.Duration = time.Since(t) + } + if len(callback) > 1 { + panic("just one callback per By, please") + } +} + +/* +BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run. +When running in parallel, each parallel process will call BeforeSuite. + +You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. + +You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite +*/ +func BeforeSuite(body func()) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", body)) +} + +/* +AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed. +AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs. + +When running in parallel, each parallel process will call AfterSuite. + +You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. + +You cannot nest any other Ginkgo nodes within an AfterSuite node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite +*/ +func AfterSuite(body func()) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", body)) +} + +/* +SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information +from that setup to the rest of the suite setup on all processes. This is useful for performing expensive or singleton setup once, then passing +information from that setup to all parallel processes. + +SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them. +The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures: + +The first function (which only runs on process #1) has the signature: + + func() []byte + +The byte array returned by the first function is then passed to the second function, which has the signature: + + func(data []byte) + +You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure. +You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite +*/ +func SynchronizedBeforeSuite(process1Body func() []byte, allProcessBody func([]byte)) bool { + return pushNode(internal.NewSynchronizedBeforeSuiteNode(process1Body, allProcessBody, types.NewCodeLocation(1))) +} + +/* +SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes +and a piece that must only run once - on process #1. + +SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1 +and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until +all other processes are finished. + +Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results. + +You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure. +You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite +*/ +func SynchronizedAfterSuite(allProcessBody func(), process1Body func()) bool { + return pushNode(internal.NewSynchronizedAfterSuiteNode(allProcessBody, process1Body, types.NewCodeLocation(1))) +} + +/* +BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes +are defined in nested Container nodes the outermost BeforeEach node closures are run first. + +You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach +*/ +func BeforeEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...)) +} + +/* +JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure. +This can allow you to separate configuration from creation of resources for a spec. + +You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. +You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach +*/ +func JustBeforeEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)) +} + +/* +AfterEach nodes are Setup nodes whose closures run after It node closures. When multiple AfterEach nodes +are defined in nested Container nodes the innermost AfterEach node closures are run first. + +Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results. + +You cannot nest any other Ginkgo nodes within an AfterEach node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup +*/ +func AfterEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...)) +} + +/* +JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec. + +You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. +You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach +*/ +func JustAfterEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...)) +} + +/* +BeforeAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once before any specs in the Ordered container run. + +Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. + +You cannot nest any other Ginkgo nodes within a BeforeAll node's closure. +You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers +And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall +*/ +func BeforeAll(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...)) +} + +/* +AfterAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once after all specs in the Ordered container have run. + +Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. + +Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior. + +You cannot nest any other Ginkgo nodes within an AfterAll node's closure. +You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers +And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall +*/ +func AfterAll(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...)) +} + +/* +DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec. + +DeferCleanup can be passed: +1. A function that takes no arguments and returns no values. +2. A function that returns an error (in which case it will assert that the returned error was nil, or it will fail the spec). +3. A function that takes arguments (and optionally returns an error) followed by a list of arguments to passe to the function. For example: + + BeforeEach(func() { + DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) + os.SetEnv("FOO", "BAR") + }) + +will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. + +When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node) +When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node) +When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node) + +Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node. +You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup +*/ +func DeferCleanup(args ...interface{}) { + fail := func(message string, cl types.CodeLocation) { + global.Failer.Fail(message, cl) + } + pushNode(internal.NewCleanupNode(fail, args...)) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/src/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go new file mode 100644 index 00000000..f23e526f --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -0,0 +1,82 @@ +package ginkgo + +import ( + "github.com/onsi/ginkgo/v2/internal" +) + +/* +Offset(uint) is a decorator that allows you to change the stack-frame offset used when computing the line number of the node in question. + +You can learn more here: https://onsi.github.io/ginkgo/#the-offset-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type Offset = internal.Offset + +/* +FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. + +You can learn more here: https://onsi.github.io/ginkgo/#repeating-spec-runs-and-managing-flaky-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type FlakeAttempts = internal.FlakeAttempts + +/* +Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe. + +You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Focus = internal.Focus + +/* +Pending is a decorator that allows you to mark a spec or container as pending. Identical to PIt and PDescribe. + +You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Pending = internal.Pending + +/* +Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs. +Tests in ordered containers cannot be marked as serial - mark the ordered container instead. + +You can learn more here: https://onsi.github.io/ginkgo/#serial-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Serial = internal.Serial + +/* +Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear. +They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs. + +You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Ordered = internal.Ordered + +/* +OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once +per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container. +The behavior for non-Ordered containers/specs is unchanged. + +You can learh more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const OncePerOrdered = internal.OncePerOrdered + +/* +Label decorates specs with Labels. Multiple labels can be passed to Label and these can be arbitrary strings but must not include the following characters: "&|!,()/". +Labels can be applied to container and subject nodes, but not setup nodes. You can provide multiple Labels to a given node and a spec's labels is the union of all labels in its node hierarchy. + +You can learn more here: https://onsi.github.io/ginkgo/#spec-labels +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func Label(labels ...string) Labels { + return Labels(labels) +} + +/* +Labels are the type for spec Label decorators. Use Label(...) to construct Labels. +You can learn more here: https://onsi.github.io/ginkgo/#spec-labels +*/ +type Labels = internal.Labels diff --git a/src/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/src/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go new file mode 100644 index 00000000..d20e5a8c --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go @@ -0,0 +1,135 @@ +package ginkgo + +import ( + "time" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +Deprecated: Done Channel for asynchronous testing + +The Done channel pattern is no longer supported in Ginkgo 2.0. +See here for better patterns for asynchronouse testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing + +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing +*/ +type Done = internal.Done + +/* +Deprecated: Custom Ginkgo test reporters are deprecated in Ginkgo 2.0. + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +type Reporter = reporters.DeprecatedReporter + +/* +Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithDefaultAndCustomReporters will simply call RunSpecs() + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) + return RunSpecs(t, description) +} + +/* +Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithCustomReporters will simply call RunSpecs() + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) + return RunSpecs(t, description) +} + +/* +Deprecated: GinkgoTestDescription has been replaced with SpecReport. + +Use CurrentSpecReport() instead. +You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +type DeprecatedGinkgoTestDescription struct { + FullTestText string + ComponentTexts []string + TestText string + + FileName string + LineNumber int + + Failed bool + Duration time.Duration +} +type GinkgoTestDescription = DeprecatedGinkgoTestDescription + +/* +Deprecated: CurrentGinkgoTestDescription has been replaced with CurrentSpecReport. + +Use CurrentSpecReport() instead. +You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +func CurrentGinkgoTestDescription() DeprecatedGinkgoTestDescription { + deprecationTracker.TrackDeprecation( + types.Deprecations.CurrentGinkgoTestDescription(), + types.NewCodeLocation(1), + ) + report := global.Suite.CurrentSpecReport() + if report.State == types.SpecStateInvalid { + return GinkgoTestDescription{} + } + componentTexts := []string{} + componentTexts = append(componentTexts, report.ContainerHierarchyTexts...) + componentTexts = append(componentTexts, report.LeafNodeText) + + return DeprecatedGinkgoTestDescription{ + ComponentTexts: componentTexts, + FullTestText: report.FullText(), + TestText: report.LeafNodeText, + FileName: report.LeafNodeLocation.FileName, + LineNumber: report.LeafNodeLocation.LineNumber, + Failed: report.State.Is(types.SpecStateFailureStates), + Duration: report.RunTime, + } +} + +/* +Deprecated: GinkgoParallelNode() has been renamed to GinkgoParallelProcess() +*/ +func GinkgoParallelNode() int { + deprecationTracker.TrackDeprecation( + types.Deprecations.ParallelNode(), + types.NewCodeLocation(1), + ) + return GinkgoParallelProcess() +} + +/* +Deprecated: Benchmarker has been removed from Ginkgo 2.0 + +Use Gomega's gmeasure package instead. +You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code +*/ +type Benchmarker interface { + Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...interface{}) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) +} + +/* +Deprecated: Measure() has been removed from Ginkgo 2.0 + +Use Gomega's gmeasure package instead. +You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code +*/ +func Measure(_ ...interface{}) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1)) + return true +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/src/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go new file mode 100644 index 00000000..778bfd7c --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go @@ -0,0 +1,41 @@ +// +build !windows + +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "io" + "os" +) + +func newColorable(file *os.File) io.Writer { + return file +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go b/src/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go new file mode 100644 index 00000000..dd1d143c --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go @@ -0,0 +1,809 @@ +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +func isTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func newColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/src/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go new file mode 100644 index 00000000..43b16211 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -0,0 +1,195 @@ +package formatter + +import ( + "fmt" + "os" + "regexp" + "strings" +) + +// ColorableStdOut and ColorableStdErr enable color output support on Windows +var ColorableStdOut = newColorable(os.Stdout) +var ColorableStdErr = newColorable(os.Stderr) + +const COLS = 80 + +type ColorMode uint8 + +const ( + ColorModeNone ColorMode = iota + ColorModeTerminal + ColorModePassthrough +) + +var SingletonFormatter = New(ColorModeTerminal) + +func F(format string, args ...interface{}) string { + return SingletonFormatter.F(format, args...) +} + +func Fi(indentation uint, format string, args ...interface{}) string { + return SingletonFormatter.Fi(indentation, format, args...) +} + +func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) +} + +type Formatter struct { + ColorMode ColorMode + colors map[string]string + styleRe *regexp.Regexp + preserveColorStylingTags bool +} + +func NewWithNoColorBool(noColor bool) Formatter { + if noColor { + return New(ColorModeNone) + } + return New(ColorModeTerminal) +} + +func New(colorMode ColorMode) Formatter { + f := Formatter{ + ColorMode: colorMode, + colors: map[string]string{ + "/": "\x1b[0m", + "bold": "\x1b[1m", + "underline": "\x1b[4m", + + "red": "\x1b[38;5;9m", + "orange": "\x1b[38;5;214m", + "coral": "\x1b[38;5;204m", + "magenta": "\x1b[38;5;13m", + "green": "\x1b[38;5;10m", + "dark-green": "\x1b[38;5;28m", + "yellow": "\x1b[38;5;11m", + "light-yellow": "\x1b[38;5;228m", + "cyan": "\x1b[38;5;14m", + "gray": "\x1b[38;5;243m", + "light-gray": "\x1b[38;5;246m", + "blue": "\x1b[38;5;12m", + }, + } + colors := []string{} + for color := range f.colors { + colors = append(colors, color) + } + f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}") + return f +} + +func (f Formatter) F(format string, args ...interface{}) string { + return f.Fi(0, format, args...) +} + +func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { + return f.Fiw(indentation, 0, format, args...) +} + +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + out := fmt.Sprintf(f.style(format), args...) + + if indentation == 0 && maxWidth == 0 { + return out + } + + lines := strings.Split(out, "\n") + + if maxWidth != 0 { + outLines := []string{} + + maxWidth = maxWidth - indentation*2 + for _, line := range lines { + if f.length(line) <= maxWidth { + outLines = append(outLines, line) + continue + } + words := strings.Split(line, " ") + outWords := []string{words[0]} + length := uint(f.length(words[0])) + for _, word := range words[1:] { + wordLength := f.length(word) + if length+wordLength+1 <= maxWidth { + length += wordLength + 1 + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + outWords = []string{word} + length = wordLength + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + } + + lines = outLines + } + + if indentation == 0 { + return strings.Join(lines, "\n") + } + + padding := strings.Repeat(" ", int(indentation)) + for i := range lines { + if lines[i] != "" { + lines[i] = padding + lines[i] + } + } + + return strings.Join(lines, "\n") +} + +func (f Formatter) length(styled string) uint { + n := uint(0) + inStyle := false + for _, b := range styled { + if inStyle { + if b == 'm' { + inStyle = false + } + continue + } + if b == '\x1b' { + inStyle = true + continue + } + n += 1 + } + return n +} + +func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string { + if len(elements) == 0 { + return "" + } + n := len(cycle) + out := "" + for i, text := range elements { + out += cycle[i%n] + text + if i < len(elements)-1 { + out += joiner + } + } + out += "{{/}}" + return f.style(out) +} + +func (f Formatter) style(s string) string { + switch f.ColorMode { + case ColorModeNone: + return f.styleRe.ReplaceAllString(s, "") + case ColorModePassthrough: + return s + case ColorModeTerminal: + return f.styleRe.ReplaceAllStringFunc(s, func(match string) string { + if out, ok := f.colors[strings.Trim(match, "{}")]; ok { + return out + } + return match + }) + } + + return "" +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go new file mode 100644 index 00000000..f7d2eaf0 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -0,0 +1,61 @@ +package build + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBuildCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "build", + Flags: flags, + Usage: "ginkgo build ", + ShortDoc: "Build the passed in (or the package in the current directory if left blank).", + DocLink: "precompiling-suites", + Command: func(args []string, _ []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + buildSpecs(args, cliConfig, goFlagsConfig) + }, + } +} + +func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, goFlagsConfig) + + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break + } + suites[suiteIdx] = suite + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + } else { + fmt.Printf("Compiled %s.test\n", suite.PackageName) + } + } + + if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 { + command.AbortWith("Failed to compile all tests") + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 00000000..2efd2860 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 00000000..12e0e565 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,50 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 00000000..88dd8d6b --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,182 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + return + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } + return +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 00000000..a367a1fc --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 00000000..0273abe9 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,113 @@ +package generators + +import ( + "bytes" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + bootstrapTemplate.Execute(buf, data) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 00000000..33a1b679 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,239 @@ +package generators + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + specTemplate.Execute(f, data) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.Replace(name, "-", "_", -1) + name = strings.Replace(name, " ", "_", -1) + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.Replace(workingDir, modRoot, "", -1) + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 00000000..c3470adb --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,41 @@ +package generators + +var specText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 00000000..9da13a0d --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,63 @@ +package generators + +import ( + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.Replace(filepath.Base(path), "-", "_", -1) + dirName = strings.Replace(dirName, " ", "_", -1) + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.Replace(name, "-", " ", -1) + name = strings.Replace(name, "_", " ", -1) + name = strings.Title(name) + name = strings.Replace(name, " ", "", -1) + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 00000000..496ec4a2 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,152 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./") + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 00000000..bd3c6d02 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +//loads each profile, combines them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + combined := &bytes.Buffer{} + modeRegex := regexp.MustCompile(`^mode: .*\n`) + for i, profile := range profiles { + contents, err := os.ReadFile(profile) + if err != nil { + return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + } + os.Remove(profile) + + // remove the cover mode line from every file + // except the first one + if i > 0 { + contents = modeRegex.ReplaceAll(contents, []byte{}) + } + + _, err = combined.Write(contents) + + // Add a newline to the end of every file if missing. + if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { + _, err = combined.Write([]byte("\n")) + } + + if err != nil { + return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + } + } + + err := os.WriteFile(destination, combined.Bytes(), 0666) + if err != nil { + return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 00000000..10058393 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,348 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1) + command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 00000000..64dcb1b7 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,283 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.Match([]byte(file.Name())) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.Match([]byte(file.Name())) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 00000000..bd9ca7d5 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 00000000..6c61f09d --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 00000000..e9abb27d --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 00000000..c197bb68 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,218 @@ +package outline + +import ( + "go/ast" + "go/token" + "strconv" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 00000000..4328ab39 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 00000000..4b45e762 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,103 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 00000000..36698d46 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 00000000..8ee0acc8 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,230 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(0, nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = endTime.Sub(time.Now()) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 00000000..7dd29439 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 00000000..6c485c5b --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 00000000..26418ac6 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 00000000..f5ddff30 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 00000000..e9f7ec0c --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,108 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if goTestRegExp.Match([]byte(info.Name())) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.Match([]byte(info.Name())) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 00000000..b4892beb --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 00000000..53272df7 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 00000000..83dbeb1e --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,190 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(0, nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go new file mode 100644 index 00000000..1beeb114 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -0,0 +1,45 @@ +package ginkgo + +import "github.com/onsi/ginkgo/v2/internal/testingtproxy" + +/* +GinkgoT() implements an interface analogous to *testing.T and can be used with +third-party libraries that accept *testing.T through an interface. + +GinkgoT() takes an optional offset argument that can be used to get the +correct line number associated with the failure. + +You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries +*/ +func GinkgoT(optionalOffset ...int) GinkgoTInterface { + offset := 3 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset) +} + +/* +The interface returned by GinkgoT(). This covers most of the methods in the testing package's T. +*/ +type GinkgoTInterface interface { + Cleanup(func()) + Setenv(kev, value string) + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Helper() + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Parallel() + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + TempDir() string +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/counter.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/counter.go new file mode 100644 index 00000000..712d85af --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/counter.go @@ -0,0 +1,9 @@ +package internal + +func MakeIncrementingIndexCounter() func() (int, error) { + idx := -1 + return func() (int, error) { + idx += 1 + return idx, nil + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/failer.go new file mode 100644 index 00000000..e9bd9565 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/failer.go @@ -0,0 +1,99 @@ +package internal + +import ( + "fmt" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +type Failer struct { + lock *sync.Mutex + failure types.Failure + state types.SpecState +} + +func NewFailer() *Failer { + return &Failer{ + lock: &sync.Mutex{}, + state: types.SpecStatePassed, + } +} + +func (f *Failer) GetState() types.SpecState { + f.lock.Lock() + defer f.lock.Unlock() + return f.state +} + +func (f *Failer) GetFailure() types.Failure { + f.lock.Lock() + defer f.lock.Unlock() + return f.failure +} + +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStatePanicked + f.failure = types.Failure{ + Message: "Test Panicked", + Location: location, + ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), + } + } +} + +func (f *Failer) Fail(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateFailed + f.failure = types.Failure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Skip(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateSkipped + f.failure = types.Failure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) AbortSuite(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateAborted + f.failure = types.Failure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Drain() (types.SpecState, types.Failure) { + f.lock.Lock() + defer f.lock.Unlock() + + failure := f.failure + outcome := f.state + + f.state = types.SpecStatePassed + f.failure = types.Failure{} + + return outcome, failure +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/focus.go new file mode 100644 index 00000000..966ea0c1 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -0,0 +1,125 @@ +package internal + +import ( + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +/* + If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to + unmark the container's focus. This gives developers a more intuitive experience when debugging specs. + It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - + this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: + + As a common example, consider: + + FDescribe("something to debug", function() { + It("works", function() {...}) + It("works", function() {...}) + FIt("doesn't work", function() {...}) + It("works", function() {...}) + }) + + here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"`Β container. + The nested policy applied by this function enables this behavior. +*/ +func ApplyNestedFocusPolicyToTree(tree *TreeNode) { + var walkTree func(tree *TreeNode) bool + walkTree = func(tree *TreeNode) bool { + if tree.Node.MarkedPending { + return false + } + hasFocusedDescendant := false + for _, child := range tree.Children { + childHasFocus := walkTree(child) + hasFocusedDescendant = hasFocusedDescendant || childHasFocus + } + tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant + return tree.Node.MarkedFocus || hasFocusedDescendant + } + + walkTree(tree) +} + +/* + Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" + It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text + and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. + + If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters. + + This function sets the `Skip` property on specs by applying Ginkgo's focus policy: + - If there are no CLI arguments and no programmatic focus, do nothing. + - If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus. + - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. + + *Note:* specs with pending nodes are Skipped when created by NewSpec. +*/ +func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { + focusString := strings.Join(suiteConfig.FocusStrings, "|") + skipString := strings.Join(suiteConfig.SkipStrings, "|") + + hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != "" + + type SkipCheck func(spec Spec) bool + + // by default, skip any specs marked pending + skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }} + hasProgrammaticFocus := false + + if !hasFocusCLIFlags { + // check for programmatic focus + for _, spec := range specs { + if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { + skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) + hasProgrammaticFocus = true + break + } + } + } + + if suiteConfig.LabelFilter != "" { + labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) + }) + } + + if len(suiteConfig.FocusFiles) > 0 { + focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) + skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) + } + + if len(suiteConfig.SkipFiles) > 0 { + skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles) + skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) }) + } + + if focusString != "" { + // skip specs that don't match the focus string + re := regexp.MustCompile(focusString) + skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) }) + } + + if skipString != "" { + // skip specs that match the skip string + re := regexp.MustCompile(skipString) + skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) }) + } + + // skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status + processedSpecs := Specs{} + for _, spec := range specs { + for _, skipCheck := range skipChecks { + if skipCheck(spec) { + spec.Skip = true + break + } + } + processedSpecs = append(processedSpecs, spec) + } + + return processedSpecs, hasProgrammaticFocus +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go new file mode 100644 index 00000000..f2c0fd89 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go @@ -0,0 +1,17 @@ +package global + +import ( + "github.com/onsi/ginkgo/v2/internal" +) + +var Suite *internal.Suite +var Failer *internal.Failer + +func init() { + InitializeGlobals() +} + +func InitializeGlobals() { + Failer = internal.NewFailer() + Suite = internal.NewSuite() +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/group.go new file mode 100644 index 00000000..c6546bba --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -0,0 +1,544 @@ +package internal + +import ( + "fmt" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type runOncePair struct { + //nodeId should only run once... + nodeID uint + nodeType types.NodeType + //...for specs in a hierarchy that includes this context + containerID uint +} + +func (pair runOncePair) isZero() bool { + return pair.nodeID == 0 +} + +func runOncePairForNode(node Node, containerID uint) runOncePair { + return runOncePair{ + nodeID: node.ID, + nodeType: node.NodeType, + containerID: containerID, + } +} + +type runOncePairs []runOncePair + +func runOncePairsForSpec(spec Spec) runOncePairs { + pairs := runOncePairs{} + + containers := spec.Nodes.WithType(types.NodeTypeContainer) + for _, node := range spec.Nodes { + if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) { + pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID)) + } else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered { + passedIntoAnOrderedContainer := false + firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool { + passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered + return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer + }) + if firstOrderedContainerDeeperThanNode.IsZero() { + continue + } + pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID)) + } + } + + return pairs +} + +func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair { + for i := range pairs { + if pairs[i].nodeID == nodeID { + return pairs[i] + } + } + return runOncePair{} +} + +func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool { + for i := range pairs { + if pairs[i] == pair { + return true + } + } + return false +} + +func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs { + count := 0 + for i := range pairs { + if pairs[i].nodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(runOncePairs, count), 0 + for i := range pairs { + if pairs[i].nodeType.Is(nodeTypes) { + out[j] = pairs[i] + j++ + } + } + return out +} + +type group struct { + suite *Suite + specs Specs + runOncePairs map[uint]runOncePairs + runOnceTracker map[runOncePair]types.SpecState + + succeeded bool +} + +func newGroup(suite *Suite) *group { + return &group{ + suite: suite, + runOncePairs: map[uint]runOncePairs{}, + runOnceTracker: map[runOncePair]types.SpecState{}, + succeeded: true, + } +} + +func (g *group) initialReportForSpec(spec Spec) types.SpecReport { + return types.SpecReport{ + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + ParallelProcess: g.suite.config.ParallelProcess, + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + } +} + +func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { + if spec.Nodes.HasNodeMarkedPending() { + return types.SpecStatePending, types.Failure{} + } + if spec.Skip { + return types.SpecStateSkipped, types.Failure{} + } + if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll { + return types.SpecStateSkipped, types.Failure{} + } + if !g.succeeded { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because an earlier spec in an ordered container failed") + } + beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach) + for _, pair := range beforeOncePairs { + if g.runOnceTracker[pair].Is(types.SpecStateSkipped) { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType)) + } + } + if g.suite.config.DryRun { + return types.SpecStatePassed, types.Failure{} + } + return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure +} + +func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { + lastSpecID := uint(0) + for idx := range g.specs { + if g.specs[idx].Skip { + continue + } + sID := g.specs[idx].SubjectID() + if g.runOncePairs[sID].hasRunOncePair(pair) { + lastSpecID = sID + } + } + return lastSpecID == specID +} + +func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { + interruptStatus := g.suite.interruptHandler.Status() + + pairs := g.runOncePairs[spec.SubjectID()] + + nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) + nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt)) + terminatingNode, terminatingPair := Node{}, runOncePair{} + + for _, node := range nodes { + oncePair := pairs.runOncePairFor(node.ID) + if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) { + continue + } + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) + if !oncePair.isZero() { + g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State + } + if g.suite.currentSpecReport.State != types.SpecStatePassed { + terminatingNode, terminatingPair = node, oncePair + break + } + } + + afterNodeWasRun := map[uint]bool{} + includeDeferCleanups := false + for { + nodes := spec.Nodes.WithType(types.NodeTypeAfterEach) + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel() + nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...) + if !terminatingNode.IsZero() { + nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel) + } + if includeDeferCleanups { + nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...) + nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...) + } + nodes = nodes.Filter(func(node Node) bool { + if afterNodeWasRun[node.ID] { + //this node has already been run on this attempt, don't rerun it + return false + } + pair := runOncePair{} + switch node.NodeType { + case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll: + // check if we were generated in an AfterNode that has already run + if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] { + return true // we were, so we should definitely run this cleanup now + } + // looks like this cleanup nodes was generated by a before node or it. + // the run-once status of a cleanup node is governed by the run-once status of its generator + pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated) + default: + pair = pairs.runOncePairFor(node.ID) + } + if pair.isZero() { + // this node is not governed by any run-once policy, we should run it + return true + } + // it's our last chance to run if we're the last spec for our oncePair + isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair) + + switch g.suite.currentSpecReport.State { + case types.SpecStatePassed: //this attempt is passing... + return isLastSpecWithPair //...we should run-once if we'this is our last chance + case types.SpecStateSkipped: //the spec was skipped by the user... + if isLastSpecWithPair { + return true //...we're the last spec, so we should run the AfterNode + } + if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel { + return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run + } + case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + if isFinalAttempt { + return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + } + if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again + if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) { + return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it + } else { + return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level + } + } + case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted + return true //...that means the test run is over and we should clean up the stack. Run the AfterNode + } + return false + }) + + if len(nodes) == 0 && includeDeferCleanups { + break + } + + for _, node := range nodes { + afterNodeWasRun[node.ID] = true + state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) + if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { + g.suite.currentSpecReport.State = state + g.suite.currentSpecReport.Failure = failure + } + } + includeDeferCleanups = true + } + +} + +func (g *group) run(specs Specs) { + g.specs = specs + for _, spec := range g.specs { + g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec) + } + + for _, spec := range g.specs { + g.suite.currentSpecReport = g.initialReportForSpec(spec) + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec) + g.suite.reporter.WillRun(g.suite.currentSpecReport) + g.suite.reportEach(spec, types.NodeTypeReportBeforeEach) + + skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending) + + g.suite.currentSpecReport.StartTime = time.Now() + if !skip { + maxAttempts := max(1, spec.FlakeAttempts()) + if g.suite.config.FlakeAttempts > 0 { + maxAttempts = g.suite.config.FlakeAttempts + } + for attempt := 0; attempt < maxAttempts; attempt++ { + g.suite.currentSpecReport.NumAttempts = attempt + 1 + g.suite.writer.Truncate() + g.suite.outputInterceptor.StartInterceptingOutput() + if attempt > 0 { + fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + } + + g.attemptSpec(attempt == maxAttempts-1, spec) + + g.suite.currentSpecReport.EndTime = time.Now() + g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime) + g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes()) + g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput() + + if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { + break + } + } + } + + g.suite.reportEach(spec, types.NodeTypeReportAfterEach) + g.suite.processCurrentSpecReport() + if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + g.succeeded = false + } + g.suite.currentSpecReport = types.SpecReport{} + } +} + +func (g *group) oldRun(specs Specs) { + var suite = g.suite + nodeState := map[uint]types.SpecState{} + groupSucceeded := true + + indexOfLastSpecContainingNodeID := func(id uint) int { + lastIdx := -1 + for idx := range specs { + if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip { + lastIdx = idx + } + } + return lastIdx + } + + for i, spec := range specs { + suite.currentSpecReport = types.SpecReport{ + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + ParallelProcess: suite.config.ParallelProcess, + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + } + + skip := spec.Skip + if spec.Nodes.HasNodeMarkedPending() { + skip = true + suite.currentSpecReport.State = types.SpecStatePending + } else { + if suite.interruptHandler.Status().Interrupted || suite.skipAll { + skip = true + } + if !groupSucceeded { + skip = true + suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because an earlier spec in an ordered container failed") + } + for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) { + if nodeState[node.ID] == types.SpecStateSkipped { + skip = true + suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because Skip() was called in BeforeAll") + break + } + } + if skip { + suite.currentSpecReport.State = types.SpecStateSkipped + } + } + + if suite.config.DryRun && !skip { + skip = true + suite.currentSpecReport.State = types.SpecStatePassed + } + + suite.reporter.WillRun(suite.currentSpecReport) + //send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks + suite.reportEach(spec, types.NodeTypeReportBeforeEach) + if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + //the reportEach failed, skip this spec + skip = true + } + + suite.currentSpecReport.StartTime = time.Now() + maxAttempts := max(1, spec.FlakeAttempts()) + if suite.config.FlakeAttempts > 0 { + maxAttempts = suite.config.FlakeAttempts + } + + for attempt := 0; !skip && (attempt < maxAttempts); attempt++ { + suite.currentSpecReport.NumAttempts = attempt + 1 + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + if attempt > 0 { + fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + } + isFinalAttempt := (attempt == maxAttempts-1) + + interruptStatus := suite.interruptHandler.Status() + deepestNestingLevelAttained := -1 + var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool { + return nodeState[n.ID] != types.SpecStatePassed + }) + nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() + nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) + nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...) + + var terminatingNode Node + for j := range nodes { + deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel) + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j])) + suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) + nodeState[nodes[j].ID] = suite.currentSpecReport.State + if suite.currentSpecReport.State != types.SpecStatePassed { + terminatingNode = nodes[j] + break + } + } + + afterAllNodesThatRan := map[uint]bool{} + // pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes + runAfterAndCleanupNodes := func(nodes Nodes) { + for j := range nodes { + state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j])) + suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) + nodeState[nodes[j].ID] = state + if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { + suite.currentSpecReport.State = state + suite.currentSpecReport.Failure = failure + if state != types.SpecStatePassed { + terminatingNode = nodes[j] + } + } + if nodes[j].NodeType.Is(types.NodeTypeAfterAll) { + afterAllNodesThatRan[nodes[j].ID] = true + } + } + } + + // pull out a helper that captures the logic of whether or not we should run a given After node. + // there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries + shouldRunAfterNode := func(n Node) bool { + if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { + return true + } + var id uint + if n.NodeType.Is(types.NodeTypeAfterAll) { + id = n.ID + if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again. + return false + } + } + if n.NodeType.Is(types.NodeTypeCleanupAfterAll) { + id = n.NodeIDWhereCleanupWasGenerated + } + isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i + + switch suite.currentSpecReport.State { + case types.SpecStatePassed: //we've passed so far... + return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it + case types.SpecStateSkipped: //the spec was skipped by the user... + if isLastSpecWithNode { + return true //...we're the last spec, so we should run the AfterNode + } + if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel { + return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip + } + case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + if isFinalAttempt { + return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + } + if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) { + //...we'll be rerunning a BeforeAll so we should cleanup after it if... + if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel { + return true //we're at the same nesting level + } + if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { + return true //we're a DeferCleanup generated by it + } + } + if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) { + //...we'll be rerunning an AfterAll so we should cleanup after it if... + if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { + return true //we're a DeferCleanup generated by it + } + } + case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted + return true //...that means the test run is over and we should clean up the stack. Run the AfterNode + } + return false + } + + // first pass - run all the JustAfterEach, Aftereach, and AfterAlls. Our shoudlRunAfterNode filter function will clean up the AfterAlls for us. + afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel() + afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...) + afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained) + afterNodes = afterNodes.Filter(shouldRunAfterNode) + runAfterAndCleanupNodes(afterNodes) + + // second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls: + afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) + runAfterAndCleanupNodes(afterNodes) + + // now we run any DeferCleanups + afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse() + afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...) + runAfterAndCleanupNodes(afterNodes) + + // third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls. + afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) + runAfterAndCleanupNodes(afterNodes) + + // and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up + afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode) + runAfterAndCleanupNodes(afterNodes) + + suite.currentSpecReport.EndTime = time.Now() + suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) + suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + + if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { + break + } + } + + //send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks + suite.reportEach(spec, types.NodeTypeReportAfterEach) + suite.processCurrentSpecReport() + if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + groupSucceeded = false + } + suite.currentSpecReport = types.SpecReport{} + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go new file mode 100644 index 00000000..aca7d1c4 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -0,0 +1,212 @@ +package interrupt_handler + +import ( + "fmt" + "os" + "os/signal" + "runtime" + "sync" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/internal/parallel_support" +) + +const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second +const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10 +const ABORT_POLLING_INTERVAL = 500 * time.Millisecond +const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second + +type InterruptCause uint + +const ( + InterruptCauseInvalid InterruptCause = iota + + InterruptCauseSignal + InterruptCauseTimeout + InterruptCauseAbortByOtherProcess +) + +func (ic InterruptCause) String() string { + switch ic { + case InterruptCauseSignal: + return "Interrupted by User" + case InterruptCauseTimeout: + return "Interrupted by Timeout" + case InterruptCauseAbortByOtherProcess: + return "Interrupted by Other Ginkgo Process" + } + return "INVALID_INTERRUPT_CAUSE" +} + +type InterruptStatus struct { + Interrupted bool + Channel chan interface{} + Cause InterruptCause +} + +type InterruptHandlerInterface interface { + Status() InterruptStatus + SetInterruptPlaceholderMessage(string) + ClearInterruptPlaceholderMessage() + InterruptMessageWithStackTraces() string +} + +type InterruptHandler struct { + c chan interface{} + lock *sync.Mutex + interrupted bool + interruptPlaceholderMessage string + interruptCause InterruptCause + client parallel_support.Client + stop chan interface{} +} + +func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler { + handler := &InterruptHandler{ + c: make(chan interface{}), + lock: &sync.Mutex{}, + interrupted: false, + stop: make(chan interface{}), + client: client, + } + handler.registerForInterrupts(timeout) + return handler +} + +func (handler *InterruptHandler) Stop() { + close(handler.stop) +} + +func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { + // os signal handling + signalChannel := make(chan os.Signal, 1) + signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM) + + // timeout handling + var timeoutChannel <-chan time.Time + var timeoutTimer *time.Timer + if timeout > 0 { + timeoutTimer = time.NewTimer(timeout) + timeoutChannel = timeoutTimer.C + } + + // cross-process abort handling + var abortChannel chan bool + if handler.client != nil { + abortChannel = make(chan bool) + go func() { + pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) + for { + select { + case <-pollTicker.C: + if handler.client.ShouldAbort() { + abortChannel <- true + pollTicker.Stop() + return + } + case <-handler.stop: + pollTicker.Stop() + return + } + } + }() + } + + // listen for any interrupt signals + // note that some (timeouts, cross-process aborts) will only trigger once + // for these we set up a ticker to keep interrupting the suite until it ends + // this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up + // get interrupted eventually + go func() { + var interruptCause InterruptCause + var repeatChannel <-chan time.Time + var repeatTicker *time.Ticker + for { + select { + case <-signalChannel: + interruptCause = InterruptCauseSignal + case <-timeoutChannel: + interruptCause = InterruptCauseTimeout + repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT) + if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION { + repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION + } + timeoutTimer.Stop() + repeatTicker = time.NewTicker(repeatInterruptTimeout) + repeatChannel = repeatTicker.C + case <-abortChannel: + interruptCause = InterruptCauseAbortByOtherProcess + repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION) + repeatChannel = repeatTicker.C + case <-repeatChannel: + //do nothing, just interrupt again using the same interruptCause + case <-handler.stop: + if timeoutTimer != nil { + timeoutTimer.Stop() + } + if repeatTicker != nil { + repeatTicker.Stop() + } + signal.Stop(signalChannel) + return + } + handler.lock.Lock() + handler.interruptCause = interruptCause + if handler.interruptPlaceholderMessage != "" { + fmt.Println(handler.interruptPlaceholderMessage) + } + handler.interrupted = true + close(handler.c) + handler.c = make(chan interface{}) + handler.lock.Unlock() + } + }() +} + +func (handler *InterruptHandler) Status() InterruptStatus { + handler.lock.Lock() + defer handler.lock.Unlock() + + return InterruptStatus{ + Interrupted: handler.interrupted, + Channel: handler.c, + Cause: handler.interruptCause, + } +} + +func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.interruptPlaceholderMessage = message +} + +func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.interruptPlaceholderMessage = "" +} + +func (handler *InterruptHandler) InterruptMessageWithStackTraces() string { + handler.lock.Lock() + out := fmt.Sprintf("%s\n\n", handler.interruptCause.String()) + defer handler.lock.Unlock() + if handler.interruptCause == InterruptCauseAbortByOtherProcess { + return out + } + out += "Here's a stack trace of all running goroutines:\n" + buf := make([]byte, 8192) + for { + n := runtime.Stack(buf, true) + if n < len(buf) { + buf = buf[:n] + break + } + buf = make([]byte, 2*len(buf)) + } + out += formatter.Fi(1, "%s", string(buf)) + return out +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go new file mode 100644 index 00000000..bf0de496 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go @@ -0,0 +1,15 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package interrupt_handler + +import ( + "os" + "os/signal" + "syscall" +) + +func SwallowSigQuit() { + c := make(chan os.Signal, 1024) + signal.Notify(c, syscall.SIGQUIT) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go new file mode 100644 index 00000000..fcf8da83 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package interrupt_handler + +func SwallowSigQuit() { + //noop +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/node.go new file mode 100644 index 00000000..289e4dde --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -0,0 +1,660 @@ +package internal + +import ( + "fmt" + "reflect" + "sort" + + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +var _global_node_id_counter = uint(0) +var _global_id_mutex = &sync.Mutex{} + +func UniqueNodeID() uint { + //There's a reace in the internal integration tests if we don't make + //accessing _global_node_id_counter safe across goroutines. + _global_id_mutex.Lock() + defer _global_id_mutex.Unlock() + _global_node_id_counter += 1 + return _global_node_id_counter +} + +type Node struct { + ID uint + NodeType types.NodeType + + Text string + Body func() + CodeLocation types.CodeLocation + NestingLevel int + + SynchronizedBeforeSuiteProc1Body func() []byte + SynchronizedBeforeSuiteAllProcsBody func([]byte) + + SynchronizedAfterSuiteAllProcsBody func() + SynchronizedAfterSuiteProc1Body func() + + ReportEachBody func(types.SpecReport) + ReportAfterSuiteBody func(types.Report) + + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedOncePerOrdered bool + FlakeAttempts int + Labels Labels + + NodeIDWhereCleanupWasGenerated uint +} + +// Decoration Types +type focusType bool +type pendingType bool +type serialType bool +type orderedType bool +type honorsOrderedType bool + +const Focus = focusType(true) +const Pending = pendingType(true) +const Serial = serialType(true) +const Ordered = orderedType(true) +const OncePerOrdered = honorsOrderedType(true) + +type FlakeAttempts uint +type Offset uint +type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing +type Labels []string + +func UnionOfLabels(labels ...Labels) Labels { + out := Labels{} + seen := map[string]bool{} + for _, labelSet := range labels { + for _, label := range labelSet { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + return out +} + +func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) { + decorations := []interface{}{} + remainingArgs := []interface{}{} + for _, arg := range args { + if isDecoration(arg) { + decorations = append(decorations, arg) + } else { + remainingArgs = append(remainingArgs, arg) + } + } + return decorations, remainingArgs +} + +func isDecoration(arg interface{}) bool { + switch t := reflect.TypeOf(arg); { + case t == nil: + return false + case t == reflect.TypeOf(Offset(0)): + return true + case t == reflect.TypeOf(types.CodeLocation{}): + return true + case t == reflect.TypeOf(Focus): + return true + case t == reflect.TypeOf(Pending): + return true + case t == reflect.TypeOf(Serial): + return true + case t == reflect.TypeOf(Ordered): + return true + case t == reflect.TypeOf(OncePerOrdered): + return true + case t == reflect.TypeOf(FlakeAttempts(0)): + return true + case t == reflect.TypeOf(Labels{}): + return true + case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): + return true + default: + return false + } +} + +func isSliceOfDecorations(slice interface{}) bool { + vSlice := reflect.ValueOf(slice) + if vSlice.Len() == 0 { + return false + } + for i := 0; i < vSlice.Len(); i++ { + if !isDecoration(vSlice.Index(i).Interface()) { + return false + } + } + return true +} + +func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { + baseOffset := 2 + node := Node{ + ID: UniqueNodeID(), + NodeType: nodeType, + Text: text, + Labels: Labels{}, + CodeLocation: types.NewCodeLocation(baseOffset), + NestingLevel: -1, + } + errors := []error{} + appendError := func(err error) { + if err != nil { + errors = append(errors, err) + } + } + + args = unrollInterfaceSlice(args) + + remainingArgs := []interface{}{} + //First get the CodeLocation up-to-date + for _, arg := range args { + switch v := arg.(type) { + case Offset: + node.CodeLocation = types.NewCodeLocation(baseOffset + int(v)) + case types.CodeLocation: + node.CodeLocation = v + default: + remainingArgs = append(remainingArgs, arg) + } + } + + labelsSeen := map[string]bool{} + trackedFunctionError := false + args = remainingArgs + remainingArgs = []interface{}{} + //now process the rest of the args + for _, arg := range args { + + switch t := reflect.TypeOf(arg); { + case t == reflect.TypeOf(float64(0)): + break //ignore deprecated timeouts + case t == reflect.TypeOf(Focus): + node.MarkedFocus = bool(arg.(focusType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus")) + } + case t == reflect.TypeOf(Pending): + node.MarkedPending = bool(arg.(pendingType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending")) + } + case t == reflect.TypeOf(Serial): + node.MarkedSerial = bool(arg.(serialType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial")) + } + case t == reflect.TypeOf(Ordered): + node.MarkedOrdered = bool(arg.(orderedType)) + if !nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered")) + } + case t == reflect.TypeOf(OncePerOrdered): + node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType)) + if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) + } + case t == reflect.TypeOf(FlakeAttempts(0)): + node.FlakeAttempts = int(arg.(FlakeAttempts)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts")) + } + case t == reflect.TypeOf(Labels{}): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) + } + for _, label := range arg.(Labels) { + if !labelsSeen[label] { + labelsSeen[label] = true + label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation) + node.Labels = append(node.Labels, label) + appendError(err) + } + } + case t.Kind() == reflect.Func: + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done))) + if !isValid { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if t.NumIn() == 0 { + node.Body = arg.(func()) + } else { + deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation) + deprecatedAsyncBody := arg.(func(Done)) + node.Body = func() { deprecatedAsyncBody(make(Done)) } + } + default: + remainingArgs = append(remainingArgs, arg) + } + } + + //validations + if node.MarkedPending && node.MarkedFocus { + appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) + } + + if node.Body == nil && !node.MarkedPending && !trackedFunctionError { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + for _, arg := range remainingArgs { + appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg)) + } + + if len(errors) > 0 { + return Node{}, errors + } + + return node, errors +} + +func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeSynchronizedBeforeSuite, + SynchronizedBeforeSuiteProc1Body: proc1Body, + SynchronizedBeforeSuiteAllProcsBody: allProcsBody, + CodeLocation: codeLocation, + }, nil +} + +func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeSynchronizedAfterSuite, + SynchronizedAfterSuiteAllProcsBody: allProcsBody, + SynchronizedAfterSuiteProc1Body: proc1Body, + CodeLocation: codeLocation, + }, nil +} + +func NewReportBeforeEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeReportBeforeEach, + ReportEachBody: body, + CodeLocation: codeLocation, + NestingLevel: -1, + }, nil +} + +func NewReportAfterEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeReportAfterEach, + ReportEachBody: body, + CodeLocation: codeLocation, + NestingLevel: -1, + }, nil +} + +func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + Text: text, + NodeType: types.NodeTypeReportAfterSuite, + ReportAfterSuiteBody: body, + CodeLocation: codeLocation, + }, nil +} + +func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { + baseOffset := 2 + node := Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeCleanupInvalid, + CodeLocation: types.NewCodeLocation(baseOffset), + NestingLevel: -1, + } + remainingArgs := []interface{}{} + for _, arg := range args { + switch t := reflect.TypeOf(arg); { + case t == reflect.TypeOf(Offset(0)): + node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset))) + case t == reflect.TypeOf(types.CodeLocation{}): + node.CodeLocation = arg.(types.CodeLocation) + default: + remainingArgs = append(remainingArgs, arg) + } + } + + if len(remainingArgs) == 0 { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + } + callback := reflect.ValueOf(remainingArgs[0]) + if !(callback.Kind() == reflect.Func && callback.Type().NumOut() <= 1) { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + } + callArgs := []reflect.Value{} + for _, arg := range remainingArgs[1:] { + callArgs = append(callArgs, reflect.ValueOf(arg)) + } + cl := node.CodeLocation + node.Body = func() { + out := callback.Call(callArgs) + if len(out) == 1 && !out[0].IsNil() { + fail(fmt.Sprintf("DeferCleanup callback returned error: %v", out[0]), cl) + } + } + + return node, nil +} + +func (n Node) IsZero() bool { + return n.ID == 0 +} + +/* Nodes */ +type Nodes []Node + +func (n Nodes) CopyAppend(nodes ...Node) Nodes { + numN := len(n) + out := make(Nodes, numN+len(nodes)) + for i, node := range n { + out[i] = node + } + for j, node := range nodes { + out[numN+j] = node + } + return out +} + +func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) { + pivotIdx := len(n) + for i := range n { + if n[i].ID == pivot.ID { + pivotIdx = i + break + } + } + left := n[:pivotIdx] + right := Nodes{} + if pivotIdx+1 < len(n) { + right = n[pivotIdx+1:] + } + + return left, right +} + +func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node { + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + return n[i] + } + } + return Node{} +} + +func (n Nodes) WithType(nodeTypes types.NodeType) Nodes { + count := 0 + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(Nodes, count), 0 + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes { + count := 0 + for i := range n { + if !n[i].NodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(Nodes, count), 0 + for i := range n { + if !n[i].NodeType.Is(nodeTypes) { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) WithoutNode(nodeToExclude Node) Nodes { + idxToExclude := len(n) + for i := range n { + if n[i].ID == nodeToExclude.ID { + idxToExclude = i + break + } + } + if idxToExclude == len(n) { + return n + } + out, j := make(Nodes, len(n)-1), 0 + for i := range n { + if i == idxToExclude { + continue + } + out[j] = n[i] + j++ + } + return out +} + +func (n Nodes) Filter(filter func(Node) bool) Nodes { + trufa, count := make([]bool, len(n)), 0 + for i := range n { + if filter(n[i]) { + trufa[i] = true + count += 1 + } + } + out, j := make(Nodes, count), 0 + for i := range n { + if trufa[i] { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) FirstSatisfying(filter func(Node) bool) Node { + for i := range n { + if filter(n[i]) { + return n[i] + } + } + return Node{} +} + +func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes { + count := 0 + for i := range n { + if n[i].NestingLevel <= deepestNestingLevel { + count++ + } + } + out, j := make(Nodes, count), 0 + for i := range n { + if n[i].NestingLevel <= deepestNestingLevel { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) SortedByDescendingNestingLevel() Nodes { + out := make(Nodes, len(n)) + copy(out, n) + sort.SliceStable(out, func(i int, j int) bool { + return out[i].NestingLevel > out[j].NestingLevel + }) + + return out +} + +func (n Nodes) SortedByAscendingNestingLevel() Nodes { + out := make(Nodes, len(n)) + copy(out, n) + sort.SliceStable(out, func(i int, j int) bool { + return out[i].NestingLevel < out[j].NestingLevel + }) + + return out +} + +func (n Nodes) FirstWithNestingLevel(level int) Node { + for i := range n { + if n[i].NestingLevel == level { + return n[i] + } + } + return Node{} +} + +func (n Nodes) Reverse() Nodes { + out := make(Nodes, len(n)) + for i := range n { + out[len(n)-1-i] = n[i] + } + return out +} + +func (n Nodes) Texts() []string { + out := make([]string, len(n)) + for i := range n { + out[i] = n[i].Text + } + return out +} + +func (n Nodes) Labels() [][]string { + out := make([][]string, len(n)) + for i := range n { + if n[i].Labels == nil { + out[i] = []string{} + } else { + out[i] = []string(n[i].Labels) + } + } + return out +} + +func (n Nodes) UnionOfLabels() []string { + out := []string{} + seen := map[string]bool{} + for i := range n { + for _, label := range n[i].Labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + return out +} + +func (n Nodes) CodeLocations() []types.CodeLocation { + out := make([]types.CodeLocation, len(n)) + for i := range n { + out[i] = n[i].CodeLocation + } + return out +} + +func (n Nodes) BestTextFor(node Node) string { + if node.Text != "" { + return node.Text + } + parentNestingLevel := node.NestingLevel - 1 + for i := range n { + if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel { + return n[i].Text + } + } + + return "" +} + +func (n Nodes) ContainsNodeID(id uint) bool { + for i := range n { + if n[i].ID == id { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedPending() bool { + for i := range n { + if n[i].MarkedPending { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedFocus() bool { + for i := range n { + if n[i].MarkedFocus { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedSerial() bool { + for i := range n { + if n[i].MarkedSerial { + return true + } + } + return false +} + +func (n Nodes) FirstNodeMarkedOrdered() Node { + for i := range n { + if n[i].MarkedOrdered { + return n[i] + } + } + return Node{} +} + +func unrollInterfaceSlice(args interface{}) []interface{} { + v := reflect.ValueOf(args) + if v.Kind() != reflect.Slice { + return []interface{}{args} + } + out := []interface{}{} + for i := 0; i < v.Len(); i++ { + el := reflect.ValueOf(v.Index(i).Interface()) + if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { + out = append(out, unrollInterfaceSlice(el.Interface())...) + } else { + out = append(out, v.Index(i).Interface()) + } + } + return out +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go new file mode 100644 index 00000000..161be820 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -0,0 +1,121 @@ +package internal + +import ( + "math/rand" + "sort" + + "github.com/onsi/ginkgo/v2/types" +) + +type GroupedSpecIndices []SpecIndices +type SpecIndices []int + +func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) { + /* + Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same + order for a given seed across test runs. + + By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging + experience - specs within a given container run in the order they appear in the file. + + Developers can set -randomizeAllSpecs to shuffle _all_ specs. + + In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled. + + Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished. + */ + + // Seed a new random source based on thee configured random seed. + r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) + + // first break things into execution groups + // a group represents a single unit of execution and is a collection of SpecIndices + // usually a group is just a single spec, however ordered containers must be preserved as a single group + executionGroupIDs := []uint{} + executionGroups := map[uint]SpecIndices{} + for idx, spec := range specs { + groupNode := spec.Nodes.FirstNodeMarkedOrdered() + if groupNode.IsZero() { + groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt) + } + executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx) + if len(executionGroups[groupNode.ID]) == 1 { + executionGroupIDs = append(executionGroupIDs, groupNode.ID) + } + } + + // now, we only shuffle all the execution groups if we're randomizing all specs, otherwise + // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs + shufflableGroupingIDs := []uint{} + shufflableGroupingIDToGroupIDs := map[uint][]uint{} + shufflableGroupingsIDToSortKeys := map[uint]string{} + + // for each execution group we're going to have to pick a node to represent how the + // execution group is grouped for shuffling: + nodeTypesToShuffle := types.NodeTypesForContainerAndIt + if suiteConfig.RandomizeAllSpecs { + nodeTypesToShuffle = types.NodeTypeIt + } + + //so, fo reach execution group: + for _, groupID := range executionGroupIDs { + // pick out a representative spec + representativeSpec := specs[executionGroups[groupID][0]] + + // and grab the node on the spec that will represent which shufflable group this execution group belongs tu + shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle) + + //add the execution group to its shufflable group + shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID) + + //and if it's the first one in + if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 { + // record the shuffleable group ID + shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID) + // and record the sort key to use + shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String() + } + } + + // now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id + sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool { + keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]] + keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]] + if keyA == keyB { + return shufflableGroupingIDs[i] < shufflableGroupingIDs[j] + } else { + return keyA < keyB + } + }) + + // now we permute the sorted shufflable grouping IDs and build the ordered Groups + orderedGroups := GroupedSpecIndices{} + permutation := r.Perm(len(shufflableGroupingIDs)) + for _, j := range permutation { + //let's get the execution group IDs for this shufflable group: + executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]] + // and we'll add their associated specindices to the orderedGroups slice: + for _, executionGroupID := range executionGroupIDsForJ { + orderedGroups = append(orderedGroups, executionGroups[executionGroupID]) + } + } + + // If we're running in series, we're done. + if suiteConfig.ParallelTotal == 1 { + return orderedGroups, GroupedSpecIndices{} + } + + // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set. + // The parallelizable groups will run across all Ginkgo processes... + // ...the serial groups will only run on Process #1 after all other processes have exited. + parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{} + for _, specIndices := range orderedGroups { + if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() { + serialGroups = append(serialGroups, specIndices) + } else { + parallelizableGroups = append(parallelizableGroups, specIndices) + } + } + + return parallelizableGroups, serialGroups +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go new file mode 100644 index 00000000..b59918a8 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go @@ -0,0 +1,250 @@ +package internal + +import ( + "bytes" + "io" + "os" + "time" +) + +const BAILOUT_TIME = 1 * time.Second +const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output. + +When running in parallel, Ginkgo captures stdout and stderr output +and attaches it to the running spec. It looks like that process is getting +stuck for this suite. + +This usually happens if you, or a library you are using, spin up an external +process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This +causes the external process to keep Ginkgo's output interceptor pipe open and +causes output interception to hang. + +Ginkgo has detected this and shortcircuited the capture process. The specs +will continue running after this message however output from the external +process that caused this issue will not be captured. + +You have several options to fix this. In preferred order they are: + +1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process. +2. Ensure your process exits before the current spec completes. If your +process is long-lived and must cross spec boundaries, this option won't +work for you. +3. Pause Ginkgo's output interceptor before starting your process and then +resume it after. Use PauseOutputInterception() and ResumeOutputInterception() +to do this. +4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will +turn off all output interception but allow specs to run in parallel without this +issue. You may miss important output if you do this including output from Go's +race detector. + +More details on issue #851 - https://github.com/onsi/ginkgo/issues/851 +` + +/* +The OutputInterceptor is used by to +intercept and capture all stdin and stderr output during a test run. +*/ +type OutputInterceptor interface { + StartInterceptingOutput() + StartInterceptingOutputAndForwardTo(io.Writer) + StopInterceptingAndReturnOutput() string + + PauseIntercepting() + ResumeIntercepting() + + Shutdown() +} + +type NoopOutputInterceptor struct{} + +func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {} +func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {} +func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" } +func (interceptor NoopOutputInterceptor) PauseIntercepting() {} +func (interceptor NoopOutputInterceptor) ResumeIntercepting() {} +func (interceptor NoopOutputInterceptor) Shutdown() {} + +type pipePair struct { + reader *os.File + writer *os.File +} + +func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) { + for { + //make the next pipe... + pair := pipePair{} + pair.reader, pair.writer, _ = os.Pipe() + select { + //...and provide it to the next consumer (they are responsible for closing the files) + case pipeChannel <- pair: + continue + //...or close the files if we were told to shutdown + case <-shutdown: + pair.reader.Close() + pair.writer.Close() + return + } + } +} + +type interceptorImplementation interface { + CreateStdoutStderrClones() (*os.File, *os.File) + ConnectPipeToStdoutStderr(*os.File) + RestoreStdoutStderrFromClones(*os.File, *os.File) + ShutdownClones(*os.File, *os.File) +} + +type genericOutputInterceptor struct { + intercepting bool + + stdoutClone *os.File + stderrClone *os.File + pipe pipePair + + shutdown chan interface{} + emergencyBailout chan interface{} + pipeChannel chan pipePair + interceptedContent chan string + + forwardTo io.Writer + accumulatedOutput string + + implementation interceptorImplementation +} + +func (interceptor *genericOutputInterceptor) StartInterceptingOutput() { + interceptor.StartInterceptingOutputAndForwardTo(io.Discard) +} + +func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) { + if interceptor.intercepting { + return + } + interceptor.accumulatedOutput = "" + interceptor.forwardTo = w + interceptor.ResumeIntercepting() +} + +func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string { + if interceptor.intercepting { + interceptor.PauseIntercepting() + } + return interceptor.accumulatedOutput +} + +func (interceptor *genericOutputInterceptor) ResumeIntercepting() { + if interceptor.intercepting { + return + } + interceptor.intercepting = true + if interceptor.stdoutClone == nil { + interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones() + interceptor.shutdown = make(chan interface{}) + go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) + } + + // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr) + // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running + interceptor.pipe = <-interceptor.pipeChannel + + interceptor.emergencyBailout = make(chan interface{}) + + //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting + go func() { + buffer := &bytes.Buffer{} + destination := io.MultiWriter(buffer, interceptor.forwardTo) + copyFinished := make(chan interface{}) + reader := interceptor.pipe.reader + go func() { + io.Copy(destination, reader) + reader.Close() // close the read end of the pipe so we don't leak a file descriptor + close(copyFinished) + }() + select { + case <-copyFinished: + interceptor.interceptedContent <- buffer.String() + case <-interceptor.emergencyBailout: + interceptor.interceptedContent <- "" + } + }() + + interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer) +} + +func (interceptor *genericOutputInterceptor) PauseIntercepting() { + if !interceptor.intercepting { + return + } + // first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing + // to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them + interceptor.pipe.writer.Close() // the pipewriter itself + + // we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors; + // this also closes #1 and #2 before it points that their original stdout and stderr file descriptions + interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone) + + var content string + select { + case content = <-interceptor.interceptedContent: + case <-time.After(BAILOUT_TIME): + /* + By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader + should eventually receive an EOF and exit. + + **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process + will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits. + + That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever + content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine + and file descriptor (those these will be cleaned up when the process exits). + + We tack on a message to notify the user that they've hit this edgecase and encourage them to address it. + */ + close(interceptor.emergencyBailout) + content = <-interceptor.interceptedContent + BAILOUT_MESSAGE + } + + interceptor.accumulatedOutput += content + interceptor.intercepting = false +} + +func (interceptor *genericOutputInterceptor) Shutdown() { + interceptor.PauseIntercepting() + + if interceptor.stdoutClone != nil { + close(interceptor.shutdown) + interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone) + interceptor.stdoutClone = nil + interceptor.stderrClone = nil + } +} + +/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */ +func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor { + return &genericOutputInterceptor{ + interceptedContent: make(chan string), + pipeChannel: make(chan pipePair), + shutdown: make(chan interface{}), + implementation: &osGlobalReassigningOutputInterceptorImpl{}, + } +} + +type osGlobalReassigningOutputInterceptorImpl struct{} + +func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) { + return os.Stdout, os.Stderr +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) { + os.Stdout = pipeWriter + os.Stderr = pipeWriter +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) { + os.Stdout = stdoutClone + os.Stderr = stderrClone +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) { + //noop +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go new file mode 100644 index 00000000..e875001c --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -0,0 +1,62 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package internal + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func NewOutputInterceptor() OutputInterceptor { + return &genericOutputInterceptor{ + interceptedContent: make(chan string), + pipeChannel: make(chan pipePair), + shutdown: make(chan interface{}), + implementation: &dupSyscallOutputInterceptorImpl{}, + } +} + +type dupSyscallOutputInterceptorImpl struct{} + +func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) { + // To clone stdout and stderr we: + // First, create two clone file descriptors that point to the stdout and stderr file descriptions + stdoutCloneFD, _ := unix.Dup(1) + stderrCloneFD, _ := unix.Dup(2) + + // And then wrap the clone file descriptors in files. + // One benefit of this (that we don't use yet) is that we can actually write + // to these files to emit output to the console evne though we're intercepting output + stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone") + stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone") + + //these clones remain alive throughout the lifecycle of the suite and don't need to be recreated + //this speeds things up a bit, actually. + return stdoutClone, stderrClone +} + +func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) { + // To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things) + // to the write end of the pipe. + // We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter + // This effectively shunts data written to stdout and stderr to the write end of our pipe + unix.Dup2(int(pipeWriter.Fd()), 1) + unix.Dup2(int(pipeWriter.Fd()), 2) +} + +func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) { + // To restore stdour/stderr from the clones we have the 1 and 2 file descriptors + // point to the original file descriptions that we saved off in the clones. + // This has the added benefit of closing the connection between these descriptors and the write end of the pipe + // which is important to cause the io.Copy on the pipe.Reader to end. + unix.Dup2(int(stdoutClone.Fd()), 1) + unix.Dup2(int(stderrClone.Fd()), 2) +} + +func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) { + // We're done with the clones so we can close them to clean up after ourselves + stdoutClone.Close() + stderrClone.Close() +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go new file mode 100644 index 00000000..30c2851a --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go @@ -0,0 +1,7 @@ +// +build windows + +package internal + +func NewOutputInterceptor() OutputInterceptor { + return NewOSGlobalReassigningOutputInterceptor() +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go new file mode 100644 index 00000000..7d5cb0b6 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -0,0 +1,69 @@ +package parallel_support + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type BeforeSuiteState struct { + Data []byte + State types.SpecState +} + +type ParallelIndexCounter struct { + Index int +} + +var ErrorGone = fmt.Errorf("gone") +var ErrorFailed = fmt.Errorf("failed") +var ErrorEarly = fmt.Errorf("early") + +var POLLING_INTERVAL = 50 * time.Millisecond + +type Server interface { + Start() + Close() + Address() string + RegisterAlive(node int, alive func() bool) + GetSuiteDone() chan interface{} + GetOutputDestination() io.Writer + SetOutputDestination(io.Writer) +} + +type Client interface { + Connect() bool + Close() error + + PostSuiteWillBegin(report types.Report) error + PostDidRun(report types.SpecReport) error + PostSuiteDidEnd(report types.Report) error + PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error + BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) + BlockUntilNonprimaryProcsHaveFinished() error + BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) + FetchNextCounter() (int, error) + PostAbort() error + ShouldAbort() bool + Write(p []byte) (int, error) +} + +func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpServer(parallelTotal, reporter) + } else { + return newRPCServer(parallelTotal, reporter) + } +} + +func NewClient(serverHost string) Client { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpClient(serverHost) + } else { + return newRPCClient(serverHost) + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go new file mode 100644 index 00000000..d076d5d1 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -0,0 +1,152 @@ +package parallel_support + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type httpClient struct { + serverHost string +} + +func newHttpClient(serverHost string) *httpClient { + return &httpClient{ + serverHost: serverHost, + } +} + +func (client *httpClient) Connect() bool { + resp, err := http.Get(client.serverHost + "/up") + if err != nil { + return false + } + resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +func (client *httpClient) Close() error { + return nil +} + +func (client *httpClient) post(path string, data interface{}) error { + var body io.Reader + if data != nil { + encoded, err := json.Marshal(data) + if err != nil { + return err + } + body = bytes.NewBuffer(encoded) + } + resp, err := http.Post(client.serverHost+path, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (client *httpClient) poll(path string, data interface{}) error { + for { + resp, err := http.Get(client.serverHost + path) + if err != nil { + return err + } + if resp.StatusCode == http.StatusTooEarly { + resp.Body.Close() + time.Sleep(POLLING_INTERVAL) + continue + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusGone { + return ErrorGone + } + if resp.StatusCode == http.StatusFailedDependency { + return ErrorFailed + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + if data != nil { + return json.NewDecoder(resp.Body).Decode(data) + } + return nil + } +} + +func (client *httpClient) PostSuiteWillBegin(report types.Report) error { + return client.post("/suite-will-begin", report) +} + +func (client *httpClient) PostDidRun(report types.SpecReport) error { + return client.post("/did-run", report) +} + +func (client *httpClient) PostSuiteDidEnd(report types.Report) error { + return client.post("/suite-did-end", report) +} + +func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.post("/before-suite-completed", beforeSuiteState) +} + +func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("/before-suite-state", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("/have-nonprimary-procs-finished", nil) +} + +func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("/aggregated-nonprimary-procs-report", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *httpClient) FetchNextCounter() (int, error) { + var counter ParallelIndexCounter + err := client.poll("/counter", &counter) + return counter.Index, err +} + +func (client *httpClient) PostAbort() error { + return client.post("/abort", nil) +} + +func (client *httpClient) ShouldAbort() bool { + err := client.poll("/abort", nil) + if err == ErrorGone { + return true + } + return false +} + +func (client *httpClient) Write(p []byte) (int, error) { + resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p)) + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("failed to emit output") + } + return len(p), err +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go new file mode 100644 index 00000000..ca1dcdca --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -0,0 +1,214 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "encoding/json" + "io" + "net" + "net/http" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type httpServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &httpServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *httpServer) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin) + mux.HandleFunc("/did-run", server.didRun) + mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) + mux.HandleFunc("/emit-output", server.emitOutput) + + //synchronization endpoints + mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) + mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) + mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) + mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/up", server.handleUp) + mux.HandleFunc("/abort", server.handleAbort) + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *httpServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *httpServer) Address() string { + return "http://" + server.listener.Addr().String() +} + +func (server *httpServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *httpServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *httpServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *httpServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} + +// +// Streaming Endpoints +// + +//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { + defer request.Body.Close() + if json.NewDecoder(request.Body).Decode(object) != nil { + writer.WriteHeader(http.StatusBadRequest) + return false + } + return true +} + +func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool { + if err == nil { + return false + } + switch err { + case ErrorEarly: + writer.WriteHeader(http.StatusTooEarly) + case ErrorGone: + writer.WriteHeader(http.StatusGone) + case ErrorFailed: + writer.WriteHeader(http.StatusFailedDependency) + default: + writer.WriteHeader(http.StatusInternalServerError) + } + return true +} + +func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer) +} + +func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) { + var report types.SpecReport + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.DidRun(report, voidReceiver), writer) +} + +func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer) +} + +func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) { + output, err := io.ReadAll(request.Body) + if err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + var n int + server.handleError(server.handler.EmitOutput(output, &n), writer) +} + +func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if !server.decode(writer, request, &beforeSuiteState) { + return + } + + server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer) +} + +func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) { + return + } + json.NewEncoder(writer).Encode(beforeSuiteState) +} + +func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) { + if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) { + return + } + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) { + var aggregatedReport types.Report + if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) { + return + } + json.NewEncoder(writer).Encode(aggregatedReport) +} + +func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) { + var n int + if server.handleError(server.handler.Counter(voidSender, &n), writer) { + return + } + json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n}) +} + +func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) { + if request.Method == "GET" { + var shouldAbort bool + server.handler.ShouldAbort(voidSender, &shouldAbort) + if shouldAbort { + writer.WriteHeader(http.StatusGone) + } else { + writer.WriteHeader(http.StatusOK) + } + } else { + server.handler.Abort(voidSender, voidReceiver) + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go new file mode 100644 index 00000000..4e83b097 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -0,0 +1,119 @@ +package parallel_support + +import ( + "net/rpc" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type rpcClient struct { + serverHost string + client *rpc.Client +} + +func newRPCClient(serverHost string) *rpcClient { + return &rpcClient{ + serverHost: serverHost, + } +} + +func (client *rpcClient) Connect() bool { + var err error + if client.client != nil { + return true + } + client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/") + if err != nil { + client.client = nil + return false + } + return true +} + +func (client *rpcClient) Close() error { + return client.client.Close() +} + +func (client *rpcClient) poll(method string, data interface{}) error { + for { + err := client.client.Call(method, voidSender, data) + if err == nil { + return nil + } + switch err.Error() { + case ErrorEarly.Error(): + time.Sleep(POLLING_INTERVAL) + case ErrorGone.Error(): + return ErrorGone + case ErrorFailed.Error(): + return ErrorFailed + default: + return err + } + } +} + +func (client *rpcClient) PostSuiteWillBegin(report types.Report) error { + return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver) +} + +func (client *rpcClient) PostDidRun(report types.SpecReport) error { + return client.client.Call("Server.DidRun", report, voidReceiver) +} + +func (client *rpcClient) PostSuiteDidEnd(report types.Report) error { + return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver) +} + +func (client *rpcClient) Write(p []byte) (int, error) { + var n int + err := client.client.Call("Server.EmitOutput", p, &n) + return n, err +} + +func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver) +} + +func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("Server.BeforeSuiteState", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver) +} + +func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("Server.AggregatedNonprimaryProcsReport", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *rpcClient) FetchNextCounter() (int, error) { + var counter int + err := client.client.Call("Server.Counter", voidSender, &counter) + return counter, err +} + +func (client *rpcClient) PostAbort() error { + return client.client.Call("Server.Abort", voidSender, voidReceiver) +} + +func (client *rpcClient) ShouldAbort() bool { + var shouldAbort bool + client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort) + return shouldAbort +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go new file mode 100644 index 00000000..2620fd56 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -0,0 +1,75 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "io" + "net" + "net/http" + "net/rpc" + + "github.com/onsi/ginkgo/v2/reporters" +) + +/* +RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type RPCServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &RPCServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *RPCServer) Start() { + rpcServer := rpc.NewServer() + rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server + + httpServer := &http.Server{} + httpServer.Handler = rpcServer + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *RPCServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *RPCServer) Address() string { + return server.listener.Addr().String() +} + +func (server *RPCServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *RPCServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *RPCServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *RPCServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go new file mode 100644 index 00000000..ca471cf3 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -0,0 +1,202 @@ +package parallel_support + +import ( + "io" + "os" + "sync" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Void struct{} + +var voidReceiver *Void = &Void{} +var voidSender Void + +// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server. +// It handles all the business logic to avoid duplication between the two servers + +type ServerHandler struct { + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool + + numSuiteDidBegins int + numSuiteDidEnds int + aggregatedReport types.Report + reportHoldingArea []types.SpecReport +} + +func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { + return &ServerHandler{ + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + parallelTotal: parallelTotal, + outputDestination: os.Stdout, + done: make(chan interface{}), + } +} + +func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidBegins += 1 + + // all summaries are identical, so it's fine to simply emit the last one of these + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.SuiteWillBegin(report) + + for _, summary := range handler.reportHoldingArea { + handler.reporter.WillRun(summary) + handler.reporter.DidRun(summary) + } + + handler.reportHoldingArea = nil + } + + return nil +} + +func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.WillRun(report) + handler.reporter.DidRun(report) + } else { + handler.reportHoldingArea = append(handler.reportHoldingArea, report) + } + + return nil +} + +func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidEnds += 1 + if handler.numSuiteDidEnds == 1 { + handler.aggregatedReport = report + } else { + handler.aggregatedReport = handler.aggregatedReport.Add(report) + } + + if handler.numSuiteDidEnds == handler.parallelTotal { + handler.reporter.SuiteDidEnd(handler.aggregatedReport) + close(handler.done) + } + + return nil +} + +func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { + var err error + *n, err = handler.outputDestination.Write(output) + return err +} + +func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.alives[proc-1] = alive +} + +func (handler *ServerHandler) procIsAlive(proc int) bool { + handler.lock.Lock() + defer handler.lock.Unlock() + alive := handler.alives[proc-1] + if alive == nil { + return true + } + return alive() +} + +func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { + for i := 2; i <= handler.parallelTotal; i++ { + if handler.procIsAlive(i) { + return false + } + } + return true +} + +func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.beforeSuiteState = beforeSuiteState + + return nil +} + +func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.beforeSuiteState.State == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *beforeSuiteState = handler.beforeSuiteState + return nil +} + +func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error { + if handler.haveNonprimaryProcsFinished() { + return nil + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error { + if handler.haveNonprimaryProcsFinished() { + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.numSuiteDidEnds == handler.parallelTotal-1 { + *report = handler.aggregatedReport + return nil + } else { + return ErrorGone + } + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) Counter(_ Void, counter *int) error { + handler.counterLock.Lock() + defer handler.counterLock.Unlock() + *counter = handler.counter + handler.counter++ + return nil +} + +func (handler *ServerHandler) Abort(_ Void, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.shouldAbort = true + return nil +} + +func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error { + handler.lock.Lock() + defer handler.lock.Unlock() + *shouldAbort = handler.shouldAbort + return nil +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go new file mode 100644 index 00000000..74199f39 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -0,0 +1,40 @@ +package internal + +import ( + "reflect" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type ReportEntry = types.ReportEntry + +func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) { + out := ReportEntry{ + Visibility: types.ReportEntryVisibilityAlways, + Name: name, + Time: time.Now(), + Location: cl, + } + var didSetValue = false + for _, arg := range args { + switch reflect.TypeOf(arg) { + case reflect.TypeOf(types.ReportEntryVisibilityAlways): + out.Visibility = arg.(types.ReportEntryVisibility) + case reflect.TypeOf(types.CodeLocation{}): + out.Location = arg.(types.CodeLocation) + case reflect.TypeOf(Offset(0)): + out.Location = types.NewCodeLocation(2 + int(arg.(Offset))) + case reflect.TypeOf(out.Time): + out.Time = arg.(time.Time) + default: + if didSetValue { + return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg) + } + out.Value = types.WrapEntryValue(arg) + didSetValue = true + } + } + + return out, nil +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/spec.go new file mode 100644 index 00000000..92072edd --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/spec.go @@ -0,0 +1,71 @@ +package internal + +import ( + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +type Spec struct { + Nodes Nodes + Skip bool +} + +func (s Spec) SubjectID() uint { + return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID +} + +func (s Spec) Text() string { + texts := []string{} + for i := range s.Nodes { + if s.Nodes[i].Text != "" { + texts = append(texts, s.Nodes[i].Text) + } + } + return strings.Join(texts, " ") +} + +func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node { + return s.Nodes.FirstNodeWithType(nodeTypes) +} + +func (s Spec) FlakeAttempts() int { + flakeAttempts := 0 + for i := range s.Nodes { + if s.Nodes[i].FlakeAttempts > 0 { + flakeAttempts = s.Nodes[i].FlakeAttempts + } + } + + return flakeAttempts +} + +type Specs []Spec + +func (s Specs) HasAnySpecsMarkedPending() bool { + for i := range s { + if s[i].Nodes.HasNodeMarkedPending() { + return true + } + } + + return false +} + +func (s Specs) CountWithoutSkip() int { + n := 0 + for i := range s { + if !s[i].Skip { + n += 1 + } + } + return n +} + +func (s Specs) AtIndices(indices SpecIndices) Specs { + out := make(Specs, len(indices)) + for i, idx := range indices { + out[i] = s[idx] + } + return out +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/suite.go new file mode 100644 index 00000000..a521ccbd --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -0,0 +1,629 @@ +package internal + +import ( + "fmt" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Phase uint + +const ( + PhaseBuildTopLevel Phase = iota + PhaseBuildTree + PhaseRun +) + +type Suite struct { + tree *TreeNode + topLevelContainers Nodes + + phase Phase + + suiteNodes Nodes + cleanupNodes Nodes + + failer *Failer + reporter reporters.Reporter + writer WriterInterface + outputInterceptor OutputInterceptor + interruptHandler interrupt_handler.InterruptHandlerInterface + config types.SuiteConfig + + skipAll bool + report types.Report + currentSpecReport types.SpecReport + currentNode Node + + client parallel_support.Client +} + +func NewSuite() *Suite { + return &Suite{ + tree: &TreeNode{}, + phase: PhaseBuildTopLevel, + } +} + +func (suite *Suite) BuildTree() error { + // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered + // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree + suite.phase = PhaseBuildTree + for _, topLevelContainer := range suite.topLevelContainers { + err := suite.PushNode(topLevelContainer) + if err != nil { + return err + } + } + return nil +} + +func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) { + if suite.phase != PhaseBuildTree { + panic("cannot run before building the tree = call suite.BuildTree() first") + } + ApplyNestedFocusPolicyToTree(suite.tree) + specs := GenerateSpecsFromTreeRoot(suite.tree) + specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) + + suite.phase = PhaseRun + suite.client = client + suite.failer = failer + suite.reporter = reporter + suite.writer = writer + suite.outputInterceptor = outputInterceptor + suite.interruptHandler = interruptHandler + suite.config = suiteConfig + + success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + + return success, hasProgrammaticFocus +} + +func (suite *Suite) InRunPhase() bool { + return suite.phase == PhaseRun +} + +/* + Tree Construction methods + + PushNode is used during PhaseBuildTopLevel and PhaseBuildTree +*/ + +func (suite *Suite) PushNode(node Node) error { + if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { + return suite.pushCleanupNode(node) + } + + if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) { + return suite.pushSuiteNode(node) + } + + if suite.phase == PhaseRun { + return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation) + } + + if node.MarkedSerial { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial { + return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType) + } + } + + if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if firstOrderedNode.IsZero() { + return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType) + } + } + + if node.NodeType == types.NodeTypeContainer { + // During PhaseBuildTopLevel we only track the top level containers without entering them + // We only enter the top level container nodes during PhaseBuildTree + // + // This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives + // the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs` + // is invoked. This makes the lifecycle easier to reason about and solves issues like #693. + if suite.phase == PhaseBuildTopLevel { + suite.topLevelContainers = append(suite.topLevelContainers, node) + return nil + } + if suite.phase == PhaseBuildTree { + parentTree := suite.tree + suite.tree = &TreeNode{Node: node} + parentTree.AppendChild(suite.tree) + err := func() (err error) { + defer func() { + if e := recover(); e != nil { + err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) + } + }() + node.Body() + return err + }() + suite.tree = parentTree + return err + } + } else { + suite.tree.AppendChild(&TreeNode{Node: node}) + return nil + } + + return nil +} + +func (suite *Suite) pushSuiteNode(node Node) error { + if suite.phase == PhaseBuildTree { + return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation) + } + + if suite.phase == PhaseRun { + return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation) + } + + switch node.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) + if len(existingBefores) > 0 { + return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation) + } + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) + if len(existingAfters) > 0 { + return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation) + } + } + + suite.suiteNodes = append(suite.suiteNodes, node) + return nil +} + +func (suite *Suite) pushCleanupNode(node Node) error { + if suite.phase != PhaseRun || suite.currentNode.IsZero() { + return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation) + } + + switch suite.currentNode.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + node.NodeType = types.NodeTypeCleanupAfterSuite + case types.NodeTypeBeforeAll, types.NodeTypeAfterAll: + node.NodeType = types.NodeTypeCleanupAfterAll + case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite: + return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType) + case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite: + return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation) + default: + node.NodeType = types.NodeTypeCleanupAfterEach + } + + node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID + node.NestingLevel = suite.currentNode.NestingLevel + suite.cleanupNodes = append(suite.cleanupNodes, node) + + return nil +} + +/* + Spec Running methods - used during PhaseRun +*/ +func (suite *Suite) CurrentSpecReport() types.SpecReport { + report := suite.currentSpecReport + if suite.writer != nil { + report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + } + return report +} + +func (suite *Suite) AddReportEntry(entry ReportEntry) error { + if suite.phase != PhaseRun { + return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) + } + suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry) + return nil +} + +func (suite *Suite) isRunningInParallel() bool { + return suite.config.ParallelTotal > 1 +} + +func (suite *Suite) processCurrentSpecReport() { + suite.reporter.DidRun(suite.currentSpecReport) + if suite.isRunningInParallel() { + suite.client.PostDidRun(suite.currentSpecReport) + } + suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport) + + if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + suite.report.SuiteSucceeded = false + if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) { + suite.skipAll = true + if suite.isRunningInParallel() { + suite.client.PostAbort() + } + } + } +} + +func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { + numSpecsThatWillBeRun := specs.CountWithoutSkip() + + suite.report = types.Report{ + SuitePath: suitePath, + SuiteDescription: description, + SuiteLabels: suiteLabels, + SuiteConfig: suite.config, + SuiteHasProgrammaticFocus: hasProgrammaticFocus, + PreRunStats: types.PreRunStats{ + TotalSpecs: len(specs), + SpecsThatWillRun: numSpecsThatWillBeRun, + }, + StartTime: time.Now(), + } + + suite.reporter.SuiteWillBegin(suite.report) + if suite.isRunningInParallel() { + suite.client.PostSuiteWillBegin(suite.report) + } + + suite.report.SuiteSucceeded = true + suite.runBeforeSuite(numSpecsThatWillBeRun) + + if suite.report.SuiteSucceeded { + groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config) + nextIndex := MakeIncrementingIndexCounter() + if suite.isRunningInParallel() { + nextIndex = suite.client.FetchNextCounter + } + + for { + groupedSpecIdx, err := nextIndex() + if err != nil { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error())) + suite.report.SuiteSucceeded = false + break + } + + if groupedSpecIdx >= len(groupedSpecIndices) { + if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 { + groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter() + suite.client.BlockUntilNonprimaryProcsHaveFinished() + continue + } + break + } + + // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts + // we encapsulate that complexity in the notion of a Group that can run + // Group is really just an extension of suite so it gets passed a suite and has access to all its internals + // Note that group is stateful and intended for single use! + newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) + } + + if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") + suite.report.SuiteSucceeded = false + } + } + + suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + + interruptStatus := suite.interruptHandler.Status() + if interruptStatus.Interrupted { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String()) + suite.report.SuiteSucceeded = false + } + suite.report.EndTime = time.Now() + suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime) + + if suite.config.ParallelProcess == 1 { + suite.runReportAfterSuite() + } + suite.reporter.SuiteDidEnd(suite.report) + if suite.isRunningInParallel() { + suite.client.PostSuiteDidEnd(suite.report) + } + + return suite.report.SuiteSucceeded +} + +func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { + interruptStatus := suite.interruptHandler.Status() + beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) + if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 { + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: beforeSuiteNode.NodeType, + LeafNodeLocation: beforeSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + } + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel) + if suite.currentSpecReport.State.Is(types.SpecStateSkipped) { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite") + suite.skipAll = true + } + suite.processCurrentSpecReport() + } +} + +func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { + afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) + if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: afterSuiteNode.NodeType, + LeafNodeLocation: afterSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + } + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel) + suite.processCurrentSpecReport() + } + + afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse() + if len(afterSuiteCleanup) > 0 { + for _, cleanupNode := range afterSuiteCleanup { + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: cleanupNode.NodeType, + LeafNodeLocation: cleanupNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + } + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel) + suite.processCurrentSpecReport() + } + } +} + +func (suite *Suite) runReportAfterSuite() { + for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) { + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: node.NodeType, + LeafNodeLocation: node.CodeLocation, + LeafNodeText: node.Text, + ParallelProcess: suite.config.ParallelProcess, + } + suite.reporter.WillRun(suite.currentSpecReport) + suite.runReportAfterSuiteNode(node, suite.report) + suite.processCurrentSpecReport() + } +} + +func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { + nodes := spec.Nodes.WithType(nodeType) + if nodeType == types.NodeTypeReportAfterEach { + nodes = nodes.SortedByDescendingNestingLevel() + } + if nodeType == types.NodeTypeReportBeforeEach { + nodes = nodes.SortedByAscendingNestingLevel() + } + if len(nodes) == 0 { + return + } + + for i := range nodes { + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + report := suite.currentSpecReport + nodes[i].Body = func() { + nodes[i].ReportEachBody(report) + } + suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, + "{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node. To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}", + nodeType, nodeType, nodeType, + nodes[i].CodeLocation, + )) + state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i])) + suite.interruptHandler.ClearInterruptPlaceholderMessage() + // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state. + // Also, if the reporter is every aborted - always override the state to propagate the abort + if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) { + suite.currentSpecReport.State = state + suite.currentSpecReport.Failure = failure + } + suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + } +} + +func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { + if suite.config.DryRun { + suite.currentSpecReport.State = types.SpecStatePassed + return + } + + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + suite.currentSpecReport.StartTime = time.Now() + + var err error + switch node.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite: + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + case types.NodeTypeCleanupAfterSuite: + if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 { + err = suite.client.BlockUntilNonprimaryProcsHaveFinished() + } + if err == nil { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + } + case types.NodeTypeSynchronizedBeforeSuite: + var data []byte + var runAllProcs bool + if suite.config.ParallelProcess == 1 { + if suite.config.ParallelTotal > 1 { + suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) + } + node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() } + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + if suite.config.ParallelTotal > 1 { + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutput() + if suite.currentSpecReport.State.Is(types.SpecStatePassed) { + err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data) + } else { + err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil) + } + } + runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil + } else { + var proc1State types.SpecState + proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData() + switch proc1State { + case types.SpecStatePassed: + runAllProcs = true + case types.SpecStateFailed, types.SpecStatePanicked: + err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1() + case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped: + suite.currentSpecReport.State = proc1State + } + } + if runAllProcs { + node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) } + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + } + case types.NodeTypeSynchronizedAfterSuite: + node.Body = node.SynchronizedAfterSuiteAllProcsBody + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + if suite.config.ParallelProcess == 1 { + if suite.config.ParallelTotal > 1 { + err = suite.client.BlockUntilNonprimaryProcsHaveFinished() + } + if err == nil { + if suite.config.ParallelTotal > 1 { + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) + } + + node.Body = node.SynchronizedAfterSuiteProc1Body + state, failure := suite.runNode(node, interruptChannel, "") + if suite.currentSpecReport.State.Is(types.SpecStatePassed) { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure + } + } + } + } + + if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + } + + suite.currentSpecReport.EndTime = time.Now() + suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) + suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + + return +} + +func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + suite.currentSpecReport.StartTime = time.Now() + + if suite.config.ParallelTotal > 1 { + aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport() + if err != nil { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + return + } + report = report.Add(aggregatedReport) + } + + node.Body = func() { node.ReportAfterSuiteBody(report) } + suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, + "{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node. To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}", + node.CodeLocation, + )) + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "") + suite.interruptHandler.ClearInterruptPlaceholderMessage() + + suite.currentSpecReport.EndTime = time.Now() + suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) + suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput() + + return +} + +func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) { + if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { + suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node) + } + + suite.currentNode = node + defer func() { + suite.currentNode = Node{} + }() + + if suite.config.EmitSpecProgress { + if text == "" { + text = "TOP-LEVEL" + } + s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String()) + suite.writer.Write([]byte(s)) + } + + var failure types.Failure + failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation + if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) { + failure.FailureNodeContext = types.FailureNodeIsLeafNode + } else if node.NestingLevel <= 0 { + failure.FailureNodeContext = types.FailureNodeAtTopLevel + } else { + failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1 + } + + outcomeC := make(chan types.SpecState) + failureC := make(chan types.Failure) + + go func() { + finished := false + defer func() { + if e := recover(); e != nil || !finished { + suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e) + } + + outcome, failureFromRun := suite.failer.Drain() + outcomeC <- outcome + failureC <- failureFromRun + }() + + node.Body() + finished = true + }() + + select { + case outcome := <-outcomeC: + failureFromRun := <-failureC + if outcome == types.SpecStatePassed { + return outcome, types.Failure{} + } + failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic + return outcome, failure + case <-interruptChannel: + failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation + return types.SpecStateInterrupted, failure + } +} + +func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure { + return types.Failure{ + Message: message, + Location: node.CodeLocation, + FailureNodeContext: types.FailureNodeIsLeafNode, + FailureNodeType: node.NodeType, + FailureNodeLocation: node.CodeLocation, + } +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/src/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go similarity index 56% rename from src/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go rename to src/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 4dcfaf4c..2f42b264 100644 --- a/src/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -3,40 +3,53 @@ package testingtproxy import ( "fmt" "io" + "os" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" ) type failFunc func(message string, callerSkip ...int) type skipFunc func(message string, callerSkip ...int) -type failedFunc func() bool -type nameFunc func() string +type cleanupFunc func(args ...interface{}) +type reportFunc func() types.SpecReport -func New(writer io.Writer, fail failFunc, skip skipFunc, failed failedFunc, name nameFunc, offset int) *ginkgoTestingTProxy { +func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy { return &ginkgoTestingTProxy{ - fail: fail, - offset: offset, - writer: writer, - skip: skip, - failed: failed, - name: name, + fail: fail, + offset: offset, + writer: writer, + skip: skip, + cleanup: cleanup, + report: report, } } type ginkgoTestingTProxy struct { - fail failFunc - skip skipFunc - failed failedFunc - name nameFunc - offset int - writer io.Writer + fail failFunc + skip skipFunc + cleanup cleanupFunc + report reportFunc + offset int + writer io.Writer } -func (t *ginkgoTestingTProxy) Cleanup(func()) { - // No-op +func (t *ginkgoTestingTProxy) Cleanup(f func()) { + t.cleanup(f, internal.Offset(1)) } -func (t *ginkgoTestingTProxy) Setenv(kev, value string) { - fmt.Println("Setenv is a noop for Ginkgo at the moment but will be implemented in V2") - // No-op until Cleanup is implemented +func (t *ginkgoTestingTProxy) Setenv(key, value string) { + originalValue, exists := os.LookupEnv(key) + if exists { + t.cleanup(os.Setenv, key, originalValue, internal.Offset(1)) + } else { + t.cleanup(os.Unsetenv, key, internal.Offset(1)) + } + + err := os.Setenv(key, value) + if err != nil { + t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1) + } } func (t *ginkgoTestingTProxy) Error(args ...interface{}) { @@ -56,7 +69,7 @@ func (t *ginkgoTestingTProxy) FailNow() { } func (t *ginkgoTestingTProxy) Failed() bool { - return t.failed() + return t.report().Failed() } func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { @@ -80,7 +93,7 @@ func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { } func (t *ginkgoTestingTProxy) Name() string { - return t.name() + return t.report().FullText() } func (t *ginkgoTestingTProxy) Parallel() { @@ -100,10 +113,16 @@ func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { } func (t *ginkgoTestingTProxy) Skipped() bool { - return false + return t.report().State.Is(types.SpecStateSkipped) } func (t *ginkgoTestingTProxy) TempDir() string { - // No-op - return "" + tmpDir, err := os.MkdirTemp("", "ginkgo") + if err != nil { + t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1) + return "" + } + t.cleanup(os.RemoveAll, tmpDir) + + return tmpDir } diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/tree.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/tree.go new file mode 100644 index 00000000..f9d1eeb8 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/tree.go @@ -0,0 +1,77 @@ +package internal + +import "github.com/onsi/ginkgo/v2/types" + +type TreeNode struct { + Node Node + Parent *TreeNode + Children TreeNodes +} + +func (tn *TreeNode) AppendChild(child *TreeNode) { + tn.Children = append(tn.Children, child) + child.Parent = tn +} + +func (tn *TreeNode) AncestorNodeChain() Nodes { + if tn.Parent == nil || tn.Parent.Node.IsZero() { + return Nodes{tn.Node} + } + return append(tn.Parent.AncestorNodeChain(), tn.Node) +} + +type TreeNodes []*TreeNode + +func (tn TreeNodes) Nodes() Nodes { + out := make(Nodes, len(tn)) + for i := range tn { + out[i] = tn[i].Node + } + return out +} + +func (tn TreeNodes) WithID(id uint) *TreeNode { + for i := range tn { + if tn[i].Node.ID == id { + return tn[i] + } + } + + return nil +} + +func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs { + var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs + walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs { + tests := Specs{} + + nodes := make(Nodes, len(trees)) + for i := range trees { + nodes[i] = trees[i].Node + nodes[i].NestingLevel = nestingLevel + } + + for i := range nodes { + if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) { + continue + } + leftNodes, rightNodes := nodes.SplitAround(nodes[i]) + leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt) + rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt) + + leftNodes = lNodes.CopyAppend(leftNodes...) + rightNodes = rightNodes.CopyAppend(rNodes...) + + if nodes[i].NodeType.Is(types.NodeTypeIt) { + tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)}) + } else { + treeNode := trees.WithID(nodes[i].ID) + tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...) + } + } + + return tests + } + + return walkTree(0, Nodes{}, Nodes{}, tree.Children) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/writer.go new file mode 100644 index 00000000..70f0a41f --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -0,0 +1,103 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +type WriterMode uint + +const ( + WriterModeStreamAndBuffer WriterMode = iota + WriterModeBufferOnly +) + +type WriterInterface interface { + io.Writer + + Truncate() + Bytes() []byte +} + +//Writer implements WriterInterface and GinkgoWriterInterface +type Writer struct { + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + mode WriterMode + + teeWriters []io.Writer +} + +func NewWriter(outWriter io.Writer) *Writer { + return &Writer{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + mode: WriterModeStreamAndBuffer, + } +} + +func (w *Writer) SetMode(mode WriterMode) { + w.lock.Lock() + defer w.lock.Unlock() + w.mode = mode +} + +func (w *Writer) Write(b []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + for _, teeWriter := range w.teeWriters { + teeWriter.Write(b) + } + + if w.mode == WriterModeStreamAndBuffer { + w.outWriter.Write(b) + } + return w.buffer.Write(b) +} + +func (w *Writer) Truncate() { + w.lock.Lock() + defer w.lock.Unlock() + w.buffer.Reset() +} + +func (w *Writer) Bytes() []byte { + w.lock.Lock() + defer w.lock.Unlock() + b := w.buffer.Bytes() + copied := make([]byte, len(b)) + copy(copied, b) + return copied +} + +//GinkgoWriterInterface +func (w *Writer) TeeTo(writer io.Writer) { + w.lock.Lock() + defer w.lock.Unlock() + + w.teeWriters = append(w.teeWriters, writer) +} + +func (w *Writer) ClearTeeWriters() { + w.lock.Lock() + defer w.lock.Unlock() + + w.teeWriters = []io.Writer{} +} + +func (w *Writer) Print(a ...interface{}) { + fmt.Fprint(w, a...) +} + +func (w *Writer) Printf(format string, a ...interface{}) { + fmt.Fprintf(w, format, a...) +} + +func (w *Writer) Println(a ...interface{}) { + fmt.Fprintln(w, a...) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/src/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go new file mode 100644 index 00000000..f39802ff --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -0,0 +1,410 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "fmt" + "io" + "runtime" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type DefaultReporter struct { + conf types.ReporterConfig + writer io.Writer + + // managing the emission stream + lastChar string + lastEmissionWasDelimiter bool + + // rendering + specDenoter string + retryDenoter string + formatter formatter.Formatter +} + +func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := NewDefaultReporter(conf, writer) + reporter.formatter = formatter.New(formatter.ColorModePassthrough) + + return reporter +} + +func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := &DefaultReporter{ + conf: conf, + writer: writer, + + lastChar: "\n", + lastEmissionWasDelimiter: false, + + specDenoter: "β€’", + retryDenoter: "β†Ί", + formatter: formatter.NewWithNoColorBool(conf.NoColor), + } + if runtime.GOOS == "windows" { + reporter.specDenoter = "+" + reporter.retryDenoter = "R" + } + + return reporter +} + +/* The Reporter Interface */ + +func (r *DefaultReporter) SuiteWillBegin(report types.Report) { + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) { + r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription)) + if len(report.SuiteLabels) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) + } + r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) + } + } else { + banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath) + r.emitBlock(banner) + bannerWidth := len(banner) + if len(report.SuiteLabels) > 0 { + labels := strings.Join(report.SuiteLabels, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels)) + if len(labels)+2 > bannerWidth { + bannerWidth = len(labels) + 2 + } + } + r.emitBlock(strings.Repeat("=", bannerWidth)) + + out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) + if report.SuiteConfig.RandomizeAllSpecs { + out += r.f(" - will randomize all specs") + } + r.emitBlock(out) + r.emit("\n") + r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal)) + } + } +} + +func (r *DefaultReporter) WillRun(report types.SpecReport) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) { + return + } + + r.emitDelimiter() + indentation := uint(0) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText)) + } else { + if len(report.ContainerHierarchyTexts) > 0 { + r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " ")) + indentation = 1 + } + line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText) + labels := report.Labels() + if len(labels) > 0 { + line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", ")) + } + r.emitBlock(line) + } + r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation)) +} + +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + var header, highlightColor string + includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter + succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct) + + hasGW := report.CapturedGinkgoWriterOutput != "" + hasStd := report.CapturedStdOutErr != "" + hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose))) + + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + denoter = fmt.Sprintf("[%s]", report.LeafNodeType) + } + + switch report.State { + case types.SpecStatePassed: + highlightColor, succinctLocationBlock = "{{green}}", v.LT(types.VerbosityLevelVerbose) + emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports { + header = fmt.Sprintf("%s PASSED", denoter) + } else { + return + } + } else { + header, stream = denoter, true + if report.NumAttempts > 1 { + header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false + } + if report.RunTime > r.conf.SlowSpecThreshold { + header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false + } + } + if hasStd || emitGinkgoWriterOutput || hasEmittableReports { + stream = false + } + case types.SpecStatePending: + highlightColor = "{{yellow}}" + includeRuntime, emitGinkgoWriterOutput = false, false + if v.Is(types.VerbosityLevelSuccinct) { + header, stream = "P", true + } else { + header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose) + } + case types.SpecStateSkipped: + highlightColor = "{{cyan}}" + if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) { + header = "S [SKIPPED]" + } else { + header, stream = "S", true + } + case types.SpecStateFailed: + highlightColor, header = "{{red}}", fmt.Sprintf("%s [FAILED]", denoter) + case types.SpecStatePanicked: + highlightColor, header = "{{magenta}}", fmt.Sprintf("%s! [PANICKED]", denoter) + case types.SpecStateInterrupted: + highlightColor, header = "{{orange}}", fmt.Sprintf("%s! [INTERRUPTED]", denoter) + case types.SpecStateAborted: + highlightColor, header = "{{coral}}", fmt.Sprintf("%s! [ABORTED]", denoter) + } + + // Emit stream and return + if stream { + r.emit(r.f(highlightColor + header + "{{/}}")) + return + } + + // Emit header + r.emitDelimiter() + if includeRuntime { + header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + + // Emit Code Location Block + r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false)) + + //Emit Stdout/Stderr Output + if hasStd { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}")) + } + + //Emit Captured GinkgoWriter Output + if emitGinkgoWriterOutput && hasGW { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + r.emitBlock(r.fi(2, "%s", report.CapturedGinkgoWriterOutput)) + r.emitBlock(r.fi(1, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + } + + if hasEmittableReports { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}")) + reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) + if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) { + reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose) + } + for _, entry := range reportEntries { + r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(3, representation)) + } + } + r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}")) + } + + // Emit Failure Message + if !report.Failure.IsZero() { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.Message)) + r.emitBlock(r.fi(1, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.Location)) + if report.Failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.ForwardedPanic)) + } + + if r.conf.FullTrace || report.Failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(2, "%s", report.Failure.Location.FullStackTrace)) + } + } + + r.emitDelimiter() +} + +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 1 { + r.emitBlock("\n\n") + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) + return + } + + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) + } else { + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) + } + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + } +} + +/* Emitting to the writer */ +func (r *DefaultReporter) emit(s string) { + if len(s) > 0 { + r.lastChar = s[len(s)-1:] + r.lastEmissionWasDelimiter = false + r.writer.Write([]byte(s)) + } +} + +func (r *DefaultReporter) emitBlock(s string) { + if len(s) > 0 { + if r.lastChar != "\n" { + r.emit("\n") + } + r.emit(s) + if r.lastChar != "\n" { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitDelimiter() { + if r.lastEmissionWasDelimiter { + return + } + r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30))) + r.lastEmissionWasDelimiter = true +} + +/* Rendering text */ +func (r *DefaultReporter) f(format string, args ...interface{}) string { + return r.formatter.F(format, args...) +} + +func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { + return r.formatter.Fi(indentation, format, args...) +} + +func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { + return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) +} + +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string { + texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} + texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) + } else { + texts = append(texts, report.LeafNodeText) + } + labels = append(labels, report.LeafNodeLabels) + locations = append(locations, report.LeafNodeLocation) + + failureLocation := report.Failure.FailureNodeLocation + if usePreciseFailureLocation { + failureLocation = report.Failure.Location + } + + switch report.Failure.FailureNodeContext { + case types.FailureNodeAtTopLevel: + texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...) + locations = append([]types.CodeLocation{failureLocation}, locations...) + labels = append([][]string{{}}, labels...) + case types.FailureNodeInContainer: + i := report.Failure.FailureNodeContainerIndex + texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType) + locations[i] = failureLocation + case types.FailureNodeIsLeafNode: + i := len(texts) - 1 + texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText) + locations[i] = failureLocation + } + + out := "" + if succinct { + out += r.f("%s", r.cycleJoin(texts, " ")) + flattenedLabels := report.Labels() + if len(flattenedLabels) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) + } + out += "\n" + if usePreciseFailureLocation { + out += r.f("{{gray}}%s{{/}}", failureLocation) + } else { + out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1]) + } + } else { + for i := range texts { + out += r.fi(uint(i), "%s", texts[i]) + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } + return out +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/src/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go new file mode 100644 index 00000000..89d30076 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -0,0 +1,149 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters +// this has been removed in V2. +// Please read the documentation at: +// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +// for Ginkgo's new behavior and for a migration path. +type DeprecatedReporter interface { + SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SuiteDidEnd(summary *types.SuiteSummary) +} + +// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and +// calls the custom reporter's methods with appropriately transformed data from the V2 report. +// +// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()` +// +// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new +// reporting support in V2. It will be removed in a future minor version of Ginkgo. +func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) { + conf := config.DeprecatedGinkgoConfigType{ + RandomSeed: report.SuiteConfig.RandomSeed, + RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs, + FocusStrings: report.SuiteConfig.FocusStrings, + SkipStrings: report.SuiteConfig.SkipStrings, + FailOnPending: report.SuiteConfig.FailOnPending, + FailFast: report.SuiteConfig.FailFast, + FlakeAttempts: report.SuiteConfig.FlakeAttempts, + EmitSpecProgress: report.SuiteConfig.EmitSpecProgress, + DryRun: report.SuiteConfig.DryRun, + ParallelNode: report.SuiteConfig.ParallelProcess, + ParallelTotal: report.SuiteConfig.ParallelTotal, + SyncHost: report.SuiteConfig.ParallelHost, + StreamHost: report.SuiteConfig.ParallelHost, + } + + summary := &types.DeprecatedSuiteSummary{ + SuiteDescription: report.SuiteDescription, + SuiteID: report.SuitePath, + + NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs, + NumberOfTotalSpecs: report.PreRunStats.TotalSpecs, + NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun, + } + + reporter.SuiteWillBegin(conf, summary) + + for _, spec := range report.SpecReports { + switch spec.LeafNodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.BeforeSuiteDidRun(setupSummary) + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.AfterSuiteDidRun(setupSummary) + case types.NodeTypeIt: + componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{} + componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...) + componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...) + componentTexts = append(componentTexts, spec.LeafNodeText) + componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation) + + specSummary := &types.DeprecatedSpecSummary{ + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + NumberOfSamples: spec.NumAttempts, + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.SpecWillRun(specSummary) + reporter.SpecDidComplete(specSummary) + + switch spec.State { + case types.SpecStatePending: + summary.NumberOfPendingSpecs += 1 + case types.SpecStateSkipped: + summary.NumberOfSkippedSpecs += 1 + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted: + summary.NumberOfFailedSpecs += 1 + case types.SpecStatePassed: + summary.NumberOfPassedSpecs += 1 + if spec.NumAttempts > 1 { + summary.NumberOfFlakedSpecs += 1 + } + } + } + } + + summary.SuiteSucceeded = report.SuiteSucceeded + summary.RunTime = report.RunTime + + reporter.SuiteDidEnd(summary) +} + +func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure { + if spec.Failure.IsZero() { + return types.DeprecatedSpecFailure{} + } + + index := 0 + switch spec.Failure.FailureNodeContext { + case types.FailureNodeInContainer: + index = spec.Failure.FailureNodeContainerIndex + case types.FailureNodeAtTopLevel: + index = -1 + case types.FailureNodeIsLeafNode: + index = len(spec.ContainerHierarchyTexts) - 1 + if spec.LeafNodeText != "" { + index += 1 + } + } + + return types.DeprecatedSpecFailure{ + Message: spec.Failure.Message, + Location: spec.Failure.Location, + ForwardedPanic: spec.Failure.ForwardedPanic, + ComponentIndex: index, + ComponentType: spec.Failure.FailureNodeType, + ComponentCodeLocation: spec.Failure.FailureNodeLocation, + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/src/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go new file mode 100644 index 00000000..7f96c450 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -0,0 +1,60 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/types" +) + +//GenerateJSONReport produces a JSON-formatted report at the passed in destination +func GenerateJSONReport(report types.Report, destination string) error { + f, err := os.Create(destination) + if err != nil { + return err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode([]types.Report{ + report, + }) + if err != nil { + return err + } + return f.Close() +} + +//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +//It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + allReports := []types.Report{} + for _, source := range sources { + reports := []types.Report{} + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = json.Unmarshal(data, &reports) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + allReports = append(allReports, reports...) + } + + f, err := os.Create(destination) + if err != nil { + return messages, err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(allReports) + if err != nil { + return messages, err + } + return messages, f.Close() +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/src/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go new file mode 100644 index 00000000..febcc650 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -0,0 +1,307 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/ + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending and/or skipped + Disabled int `xml:"disabled,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all test suites + Time float64 `xml:"time,attr"` + + //The set of all test suites + TestSuites []JUnitTestSuite `xml:"testsuite"` +} + +type JUnitTestSuite struct { + // Name maps onto the description of the test suite - maps onto Report.SuiteDescription + Name string `xml:"name,attr"` + // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath + Package string `xml:"package,attr"` + // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending + Disabled int `xml:"disabled,attr"` + // Skiped maps onto specs that are skipped + Skipped int `xml:"skipped,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime + Time float64 `xml:"time,attr"` + // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime + Timestamp string `xml:"timestamp,attr"` + + //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs + Properties JUnitProperties `xml:"properties"` + + //TestCases capture the individual specs + TestCases []JUnitTestCase `xml:"testcase"` +} + +type JUnitProperties struct { + Properties []JUnitProperty `xml:"property"` +} + +func (jup JUnitProperties) WithName(name string) string { + for _, property := range jup.Properties { + if property.Name == name { + return property.Value + } + } + return "" +} + +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +type JUnitTestCase struct { + // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" + Name string `xml:"name,attr"` + // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription + Classname string `xml:"classname,attr"` + // Status maps onto the string representation of SpecReport.State + Status string `xml:"status,attr"` + // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime + Time float64 `xml:"time,attr"` + //Skipped is populated with a message if the test was skipped or pending + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + //Error is populated if the test panicked or was interrupted + Error *JUnitError `xml:"error,omitempty"` + //Failure is populated if the test failed + Failure *JUnitFailure `xml:"failure,omitempty"` + //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr + SystemOut string `xml:"system-out,omitempty"` + //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput + SystemErr string `xml:"system-err,omitempty"` +} + +type JUnitSkipped struct { + // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON) + Message string `xml:"message,attr"` +} + +type JUnitError struct { + //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted" + Message string `xml:"message,attr"` + //Type is one of "panicked" or "interrupted" + Type string `xml:"type,attr"` + //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines + Description string `xml:",chardata"` +} + +type JUnitFailure struct { + //Message maps onto the failure message - equivalent to SpecReport.Failure.Message + Message string `xml:"message,attr"` + //Type is "failed" + Type string `xml:"type,attr"` + //Description maps onto the location and stack trace of the failure + Description string `xml:",chardata"` +} + +func GenerateJUnitReport(report types.Report, dst string) error { + suite := JUnitTestSuite{ + Name: report.SuiteDescription, + Package: report.SuitePath, + Time: report.RunTime.Seconds(), + Timestamp: report.StartTime.Format("2006-01-02T15:04:05"), + Properties: JUnitProperties{ + Properties: []JUnitProperty{ + {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)}, + {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, + {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, + {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, + {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, + {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, + {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, + {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, + {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, + {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, + {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, + {"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)}, + {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, + {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, + {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, + }, + }, + } + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + test := JUnitTestCase{ + Name: name, + Classname: report.SuiteDescription, + Status: spec.State.String(), + Time: spec.RunTime.Seconds(), + SystemOut: systemOutForUnstructureReporters(spec), + SystemErr: spec.CapturedGinkgoWriterOutput, + } + suite.Tests += 1 + + switch spec.State { + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + test.Skipped = &JUnitSkipped{Message: message} + suite.Skipped += 1 + case types.SpecStatePending: + test.Skipped = &JUnitSkipped{Message: "pending"} + suite.Disabled += 1 + case types.SpecStateFailed: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "failed", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Failures += 1 + case types.SpecStateInterrupted: + test.Error = &JUnitError{ + Message: "interrupted", + Type: "interrupted", + Description: spec.Failure.Message, + } + suite.Errors += 1 + case types.SpecStateAborted: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "aborted", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Errors += 1 + case types.SpecStatePanicked: + test.Error = &JUnitError{ + Message: spec.Failure.ForwardedPanic, + Type: "panicked", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Errors += 1 + } + + suite.TestCases = append(suite.TestCases, test) + } + + junitReport := JUnitTestSuites{ + Tests: suite.Tests, + Disabled: suite.Disabled + suite.Skipped, + Errors: suite.Errors, + Failures: suite.Failures, + Time: suite.Time, + TestSuites: []JUnitTestSuite{suite}, + } + + f, err := os.Create(dst) + if err != nil { + return err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(junitReport) + + return f.Close() +} + +func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) { + messages := []string{} + mergedReport := JUnitTestSuites{} + for _, source := range sources { + report := JUnitTestSuites{} + f, err := os.Open(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = xml.NewDecoder(f).Decode(&report) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + + mergedReport.Tests += report.Tests + mergedReport.Disabled += report.Disabled + mergedReport.Errors += report.Errors + mergedReport.Failures += report.Failures + mergedReport.Time += report.Time + mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) + } + + f, err := os.Create(dst) + if err != nil { + return messages, err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(mergedReport) + + return messages, f.Close() +} + +func systemOutForUnstructureReporters(spec types.SpecReport) string { + systemOut := spec.CapturedStdOutErr + if len(spec.ReportEntries) > 0 { + systemOut += "\nReport Entries:\n" + for i, entry := range spec.ReportEntries { + systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano)) + if representation := entry.StringRepresentation(); representation != "" { + systemOut += representation + "\n" + } + if i+1 < len(spec.ReportEntries) { + systemOut += "--\n" + } + } + } + return systemOut +} + +// Deprecated JUnitReporter (so folks can still compile their suites) +type JUnitReporter struct{} + +func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} } +func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {} +func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/src/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go new file mode 100644 index 00000000..29f84e7c --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -0,0 +1,19 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type Reporter interface { + SuiteWillBegin(report types.Report) + WillRun(report types.SpecReport) + DidRun(report types.SpecReport) + SuiteDidEnd(report types.Report) +} + +type NoopReporter struct{} + +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/src/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go new file mode 100644 index 00000000..2aa2f184 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -0,0 +1,97 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +func tcEscape(s string) string { + s = strings.Replace(s, "|", "||", -1) + s = strings.Replace(s, "'", "|'", -1) + s = strings.Replace(s, "\n", "|n", -1) + s = strings.Replace(s, "\r", "|r", -1) + s = strings.Replace(s, "[", "|[", -1) + s = strings.Replace(s, "]", "|]", -1) + return s +} + +func GenerateTeamcityReport(report types.Report, dst string) error { + f, err := os.Create(dst) + if err != nil { + return err + } + + name := report.SuiteDescription + labels := report.SuiteLabels + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + name = tcEscape(name) + fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) + switch spec.State { + case types.SpecStatePending: + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name) + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) + case types.SpecStateFailed: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStatePanicked: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateInterrupted: + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted' details='%s']\n", name, tcEscape(spec.Failure.Message)) + case types.SpecStateAborted: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + } + + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructureReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(spec.CapturedGinkgoWriterOutput)) + fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) + } + fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) + + return f.Close() +} + +func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) { + messages := []string{} + merged := []byte{} + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + merged = append(merged, data...) + } + return messages, os.WriteFile(dst, merged, 0666) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/src/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go new file mode 100644 index 00000000..07b4c612 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -0,0 +1,153 @@ +package ginkgo + +import ( + "fmt" + "strings" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +Report represents the report for a Suite. +It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#Report +*/ +type Report = types.Report + +/* +Report represents the report for a Spec. +It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +type SpecReport = types.SpecReport + +/* +CurrentSpecReport returns information about the current running spec. +The returned object is a types.SpecReport which includes helper methods +to make extracting information about the spec easier. + +You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport +You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +*/ +func CurrentSpecReport() SpecReport { + return global.Suite.CurrentSpecReport() +} + +/* + ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter + +- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted. +- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior). +- ReportEntryVisibilityNever: the ReportEntry is never emitted though it appears in any generated machine-readable reports (e.g. by setting `--json-report`). + +You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports +*/ +type ReportEntryVisibility = types.ReportEntryVisibility + +const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, ReportEntryVisibilityNever = types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose, types.ReportEntryVisibilityNever + +/* +AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport. +It can take any of the following arguments: + - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. + - A ReportEntryVisibility enum to control the visibility of the ReportEntry + - An Offset or CodeLocation decoration to control the reported location of the ReportEntry + +If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console. + +AddReportEntry() must be called within a Subject or Setup node - not in a Container node. + +You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports +*/ +func AddReportEntry(name string, args ...interface{}) { + cl := types.NewCodeLocation(1) + reportEntry, err := internal.NewReportEntry(name, cl, args...) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Report Entry:\n%s", err.Error()), 1) + } + err = global.Suite.AddReportEntry(reportEntry) + if err != nil { + Fail(fmt.Sprintf("Failed to add Report Entry:\n%s", err.Error()), 1) + } +} + +/* +ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that +receives a SpecReport. They are called before the spec starts. + +You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. +You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically +*/ +func ReportBeforeEach(body func(SpecReport)) bool { + return pushNode(internal.NewReportBeforeEachNode(body, types.NewCodeLocation(1))) +} + +/* +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that +receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. + +You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. +You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically +*/ +func ReportAfterEach(body func(SpecReport)) bool { + return pushNode(internal.NewReportAfterEachNode(body, types.NewCodeLocation(1))) +} + +/* +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. + +They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. +ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) + +When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across +all parallel nodes + +In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags. + +You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. +You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically +You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports +*/ +func ReportAfterSuite(text string, body func(Report)) bool { + return pushNode(internal.NewReportAfterSuiteNode(text, body, types.NewCodeLocation(1))) +} + +func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { + body := func(report Report) { + if reporterConfig.JSONReport != "" { + err := reporters.GenerateJSONReport(report, reporterConfig.JSONReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error())) + } + } + if reporterConfig.JUnitReport != "" { + err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate JUnit report:\n%s", err.Error())) + } + } + if reporterConfig.TeamcityReport != "" { + err := reporters.GenerateTeamcityReport(report, reporterConfig.TeamcityReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Teamcity report:\n%s", err.Error())) + } + } + } + + flags := []string{} + if reporterConfig.JSONReport != "" { + flags = append(flags, "--json-report") + } + if reporterConfig.JUnitReport != "" { + flags = append(flags, "--junit-report") + } + if reporterConfig.TeamcityReport != "" { + flags = append(flags, "--teamcity-report") + } + pushNode(internal.NewReportAfterSuiteNode( + fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), + body, + types.NewCustomCodeLocation("autogenerated by Ginkgo"), + )) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/src/vendor/github.com/onsi/ginkgo/v2/table_dsl.go new file mode 100644 index 00000000..0db0e718 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -0,0 +1,265 @@ +package ginkgo + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" +) + +/* +The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via: + + fmt.Sprintf(formatString, parameters...) + +where parameters are the parameters passed into the entry. + +When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions. + +You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions +*/ +type EntryDescription string + +func (ed EntryDescription) render(args ...interface{}) string { + return fmt.Sprintf(string(ed), args...) +} + +/* +DescribeTable describes a table-driven spec. + +For example: + + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ξ©(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) + +You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs +And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns +*/ +func DescribeTable(description string, args ...interface{}) bool { + generateTable(description, args...) + return true +} + +/* +You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. +*/ +func FDescribeTable(description string, args ...interface{}) bool { + args = append(args, internal.Focus) + generateTable(description, args...) + return true +} + +/* +You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. +*/ +func PDescribeTable(description string, args ...interface{}) bool { + args = append(args, internal.Pending) + generateTable(description, args...) + return true +} + +/* +You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. +*/ +var XDescribeTable = PDescribeTable + +/* +TableEntry represents an entry in a table test. You generally use the `Entry` constructor. +*/ +type TableEntry struct { + description interface{} + decorations []interface{} + parameters []interface{} + codeLocation types.CodeLocation +} + +/* +Entry constructs a TableEntry. + +The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description. +Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table. + +Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in. + +You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs +*/ +func Entry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can focus a particular entry with FEntry. This is equivalent to FIt. +*/ +func FEntry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + decorations = append(decorations, internal.Focus) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can mark a particular entry as pending with PEntry. This is equivalent to PIt. +*/ +func PEntry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + decorations = append(decorations, internal.Pending) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can mark a particular entry as pending with XEntry. This is equivalent to XIt. +*/ +var XEntry = PEntry + +func generateTable(description string, args ...interface{}) { + cl := types.NewCodeLocation(2) + containerNodeArgs := []interface{}{cl} + + entries := []TableEntry{} + var itBody interface{} + + var tableLevelEntryDescription interface{} + tableLevelEntryDescription = func(args ...interface{}) string { + out := []string{} + for _, arg := range args { + out = append(out, fmt.Sprint(arg)) + } + return "Entry: " + strings.Join(out, ", ") + } + + for i, arg := range args { + switch t := reflect.TypeOf(arg); { + case t == nil: + exitIfErr(types.GinkgoErrors.IncorrectParameterTypeForTable(i, "nil", cl)) + case t == reflect.TypeOf(TableEntry{}): + entries = append(entries, arg.(TableEntry)) + case t == reflect.TypeOf([]TableEntry{}): + entries = append(entries, arg.([]TableEntry)...) + case t == reflect.TypeOf(EntryDescription("")): + tableLevelEntryDescription = arg.(EntryDescription).render + case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): + tableLevelEntryDescription = arg + case t.Kind() == reflect.Func: + if itBody != nil { + exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) + } + itBody = arg + default: + containerNodeArgs = append(containerNodeArgs, arg) + } + } + + containerNodeArgs = append(containerNodeArgs, func() { + for _, entry := range entries { + var err error + entry := entry + var description string + switch t := reflect.TypeOf(entry.description); { + case t == nil: + err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation) + if err == nil { + description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String() + } + case t == reflect.TypeOf(EntryDescription("")): + description = entry.description.(EntryDescription).render(entry.parameters...) + case t == reflect.TypeOf(""): + description = entry.description.(string) + case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): + err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation) + if err == nil { + description = invokeFunction(entry.description, entry.parameters)[0].String() + } + default: + err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) + } + + if err == nil { + err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation) + } + itNodeArgs := []interface{}{entry.codeLocation} + itNodeArgs = append(itNodeArgs, entry.decorations...) + itNodeArgs = append(itNodeArgs, func() { + if err != nil { + panic(err) + } + invokeFunction(itBody, entry.parameters) + }) + + pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) + } + }) + + pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) +} + +func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value { + inValues := make([]reflect.Value, len(parameters)) + + funcType := reflect.TypeOf(function) + limit := funcType.NumIn() + if funcType.IsVariadic() { + limit = limit - 1 + } + + for i := 0; i < limit && i < len(parameters); i++ { + inValues[i] = computeValue(parameters[i], funcType.In(i)) + } + + if funcType.IsVariadic() { + variadicType := funcType.In(limit).Elem() + for i := limit; i < len(parameters); i++ { + inValues[i] = computeValue(parameters[i], variadicType) + } + } + + return reflect.ValueOf(function).Call(inValues) +} + +func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation) error { + funcType := reflect.TypeOf(function) + limit := funcType.NumIn() + if funcType.IsVariadic() { + limit = limit - 1 + } + if len(parameters) < limit { + return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl) + } + if len(parameters) > limit && !funcType.IsVariadic() { + return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl) + } + var i = 0 + for ; i < limit; i++ { + actual := reflect.TypeOf(parameters[i]) + expected := funcType.In(i) + if !(actual == nil) && !actual.AssignableTo(expected) { + return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl) + } + } + if funcType.IsVariadic() { + expected := funcType.In(limit).Elem() + for ; i < len(parameters); i++ { + actual := reflect.TypeOf(parameters[i]) + if !(actual == nil) && !actual.AssignableTo(expected) { + return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl) + } + } + } + + return nil +} + +func computeValue(parameter interface{}, t reflect.Type) reflect.Value { + if parameter == nil { + return reflect.Zero(t) + } else { + return reflect.ValueOf(parameter) + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/src/vendor/github.com/onsi/ginkgo/v2/types/code_location.go new file mode 100644 index 00000000..00107d3a --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -0,0 +1,92 @@ +package types + +import ( + "fmt" + "os" + "regexp" + "runtime" + "runtime/debug" + "strings" +) + +type CodeLocation struct { + FileName string `json:",omitempty"` + LineNumber int `json:",omitempty"` + FullStackTrace string `json:",omitempty"` + CustomMessage string `json:",omitempty"` +} + +func (codeLocation CodeLocation) String() string { + if codeLocation.CustomMessage != "" { + return codeLocation.CustomMessage + } + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} + +func (codeLocation CodeLocation) ContentsOfLine() string { + if codeLocation.CustomMessage != "" { + return "" + } + contents, err := os.ReadFile(codeLocation.FileName) + if err != nil { + return "" + } + lines := strings.Split(string(contents), "\n") + if len(lines) < codeLocation.LineNumber { + return "" + } + return lines[codeLocation.LineNumber-1] +} + +func NewCustomCodeLocation(message string) CodeLocation { + return CodeLocation{ + CustomMessage: message, + } +} + +func NewCodeLocation(skip int) CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + return CodeLocation{FileName: file, LineNumber: line} +} + +func NewCodeLocationWithStackTrace(skip int) CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip+1) + return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +// PruneStack removes references to functions that are internal to Ginkgo +// and the Go runtime from a stack string and a certain number of stack entries +// at the beginning of the stack. The stack string has the format +// as returned by runtime/debug.Stack. The leading goroutine information is +// optional and always removed if present. Beware that runtime/debug.Stack +// adds itself as first entry, so typically skip must be >= 1 to remove that +// entry. +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + // Ensure that the even entries are the method names and the + // the odd entries the source code information. + if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+1" is for skipping over the initial entry, which is + // runtime/debug.Stack() itself. + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" { + prunedStack = stack + } else { + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + // We filter out based on the source code file name. + if !re.Match([]byte(stack[i*2+1])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/config.go b/src/vendor/github.com/onsi/ginkgo/v2/types/config.go new file mode 100644 index 00000000..07ef4c3a --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -0,0 +1,723 @@ +/* +Ginkgo accepts a number of configuration options. +These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) +*/ + +package types + +import ( + "flag" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +// Configuration controlling how an individual test suite is run +type SuiteConfig struct { + RandomSeed int64 + RandomizeAllSpecs bool + FocusStrings []string + SkipStrings []string + FocusFiles []string + SkipFiles []string + LabelFilter string + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + Timeout time.Duration + OutputInterceptorMode string + + ParallelProcess int + ParallelTotal int + ParallelHost string +} + +func NewDefaultSuiteConfig() SuiteConfig { + return SuiteConfig{ + RandomSeed: time.Now().Unix(), + Timeout: time.Hour, + ParallelProcess: 1, + ParallelTotal: 1, + } +} + +type VerbosityLevel uint + +const ( + VerbosityLevelSuccinct VerbosityLevel = iota + VerbosityLevelNormal + VerbosityLevelVerbose + VerbosityLevelVeryVerbose +) + +func (vl VerbosityLevel) GT(comp VerbosityLevel) bool { + return vl > comp +} + +func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool { + return vl >= comp +} + +func (vl VerbosityLevel) Is(comp VerbosityLevel) bool { + return vl == comp +} + +func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool { + return vl <= comp +} + +func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { + return vl < comp +} + +// Configuration for Ginkgo's reporter +type ReporterConfig struct { + NoColor bool + SlowSpecThreshold time.Duration + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + AlwaysEmitGinkgoWriter bool + + JSONReport string + JUnitReport string + TeamcityReport string +} + +func (rc ReporterConfig) Verbosity() VerbosityLevel { + if rc.Succinct { + return VerbosityLevelSuccinct + } else if rc.Verbose { + return VerbosityLevelVerbose + } else if rc.VeryVerbose { + return VerbosityLevelVeryVerbose + } + return VerbosityLevelNormal +} + +func (rc ReporterConfig) WillGenerateReport() bool { + return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" +} + +func NewDefaultReporterConfig() ReporterConfig { + return ReporterConfig{ + SlowSpecThreshold: 5 * time.Second, + } +} + +// Configuration for the Ginkgo CLI +type CLIConfig struct { + //for build, run, and watch + Recurse bool + SkipPackage string + RequireSuite bool + NumCompilers int + + //for run and watch only + Procs int + Parallel bool + AfterRunHook string + OutputDir string + KeepSeparateCoverprofiles bool + KeepSeparateReports bool + + //for run only + KeepGoing bool + UntilItFails bool + Repeat int + RandomizeSuites bool + + //for watch only + Depth int + WatchRegExp string +} + +func NewDefaultCLIConfig() CLIConfig { + return CLIConfig{ + Depth: 1, + WatchRegExp: `\.go$`, + } +} + +func (g CLIConfig) ComputedProcs() int { + if g.Procs > 0 { + return g.Procs + } + + n := 1 + if g.Parallel { + n = runtime.NumCPU() + if n > 4 { + n = n - 1 + } + } + return n +} + +func (g CLIConfig) ComputedNumCompilers() int { + if g.NumCompilers > 0 { + return g.NumCompilers + } + + return runtime.NumCPU() +} + +// Configuration for the Ginkgo CLI capturing available go flags +// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). +// More details can be found at: +// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ +type GoFlagsConfig struct { + //build-time flags for code-and-performance analysis + Race bool + Cover bool + CoverMode string + CoverPkg string + Vet string + + //run-time flags for code-and-performance analysis + BlockProfile string + BlockProfileRate int + CoverProfile string + CPUProfile string + MemProfile string + MemProfileRate int + MutexProfile string + MutexProfileFraction int + Trace string + + //build-time flags for building + A bool + ASMFlags string + BuildMode string + Compiler string + GCCGoFlags string + GCFlags string + InstallSuffix string + LDFlags string + LinkShared bool + Mod string + N bool + ModFile string + ModCacheRW bool + MSan bool + PkgDir string + Tags string + TrimPath bool + ToolExec string + Work bool + X bool +} + +func NewDefaultGoFlagsConfig() GoFlagsConfig { + return GoFlagsConfig{} +} + +func (g GoFlagsConfig) BinaryMustBePreserved() bool { + return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" +} + +// Configuration that were deprecated in 2.0 +type deprecatedConfig struct { + DebugParallel bool + NoisySkippings bool + NoisyPendings bool + RegexScansFilePath bool + SlowSpecThresholdWithFLoatUnits float64 + Stream bool + Notify bool +} + +// Flags + +// Flags sections used by both the CLI and the Ginkgo test process +var FlagSections = GinkgoFlagSections{ + {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"}, + {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"}, + {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"}, + {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism", + Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."}, + {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, + {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, + {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", + Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, + {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, + {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"}, + {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true, + Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."}, +} + +// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI +var SuiteConfigFlags = GinkgoFlags{ + {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", + Usage: "The seed used to randomize the spec suite."}, + {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, + + {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."}, + {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, + {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + + {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, + {KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug", + Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."}, + {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", + Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", + Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, + + {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", + Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."}, + + {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, +} + +// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) +var ParallelConfigFlags = GinkgoFlags{ + {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "This worker process's (one-indexed) process number. For running specs in parallel."}, + {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "The total number of worker processes. For running specs in parallel."}, + {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI", + Usage: "The address for the server that will synchronize the processes."}, +} + +// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI +var ReporterConfigFlags = GinkgoFlags{ + {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, suppress color output in default reporter."}, + {KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s", + Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."}, + {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", + Usage: "If set, emits more output including GinkgoWriter contents."}, + {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", + Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."}, + {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output", + Usage: "If set, default reporter prints out a very succinct report"}, + {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", + Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, + {KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed", + Usage: "If set, default reporter prints out captured output of passed tests."}, + + {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", + Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, + {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."}, + + {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold", + Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, + {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, +} + +// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process +func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) + flags = flags.WithPrefix("ginkgo") + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "D": &deprecatedConfig{}, + } + extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"} + + return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection) +} + +// VetConfig validates that the Ginkgo test process' configuration is sound +func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error { + errors := []error{} + + if flagSet.WasSet("count") || flagSet.WasSet("test.count") { + flag := flagSet.Lookup("count") + if flag == nil { + flag = flagSet.Lookup("test.count") + } + count, err := strconv.Atoi(flag.Value.String()) + if err != nil || count != 1 { + errors = append(errors, GinkgoErrors.InvalidGoFlagCount()) + } + } + + if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") { + errors = append(errors, GinkgoErrors.InvalidGoFlagParallel()) + } + + if suiteConfig.ParallelTotal < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration()) + } + + if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration()) + } + + if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" { + errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration()) + } + + if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 { + errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) + } + + if len(suiteConfig.FocusFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.FocusFiles) + if err != nil { + errors = append(errors, err) + } + } + + if len(suiteConfig.SkipFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.SkipFiles) + if err != nil { + errors = append(errors, err) + } + } + + if suiteConfig.LabelFilter != "" { + _, err := ParseLabelFilter(suiteConfig.LabelFilter) + if err != nil { + errors = append(errors, err) + } + } + + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "", "dup", "swap", "none": + default: + errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode)) + } + + numVerbosity := 0 + for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} { + if v { + numVerbosity++ + } + } + if numVerbosity > 1 { + errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration()) + } + + return errors +} + +// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands +var GinkgoCLISharedFlags = GinkgoFlags{ + {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites", + Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."}, + {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "comma-separated list of packages", + Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."}, + {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."}, + {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)", + Usage: "When running multiple packages, the number of concurrent compilations to perform."}, +} + +// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run) +var GinkgoCLIRunAndWatchFlags = GinkgoFlags{ + {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "The number of parallel test nodes to run."}, + {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "--nodes is an alias for --procs"}, + {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel", + Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."}, + {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Command to run when a test suite completes."}, + {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support", + Usage: "A location to place all generated profiles and reports."}, + {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis", + Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."}, + {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output", + Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."}, + + {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands +var GinkgoCLIRunFlags = GinkgoFlags{ + {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, failures from earlier test suites do not prevent later test suites from running."}, + {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."}, + {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once", + Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."}, + {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize the order in which test suites run."}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands +var GinkgoCLIWatchFlags = GinkgoFlags{ + {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch", + Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."}, + {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "Regular Expression", + UsageDefaultValue: `\.go$`, + Usage: "Only files matching this regular expression will be watched for changes."}, +} + +// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI +var GoBuildFlags = GinkgoFlags{ + {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", + Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", + Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, + {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", + Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`}, + {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis", + Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."}, + + {KeyPath: "Go.A", Name: "a", SectionKey: "go-build", + Usage: "force rebuilding of packages that are already up-to-date."}, + {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool asm invocation."}, + {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", + Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", + Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, + {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each gccgo compiler/linker invocation."}, + {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool compile invocation."}, + {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build", + Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."}, + {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool link invocation."}, + {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build", + Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."}, + {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build", + Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."}, + {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build", + Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."}, + {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build", + Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`}, + {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build", + Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."}, + {KeyPath: "Go.N", Name: "n", SectionKey: "go-build", + Usage: "print the commands but do not run them."}, + {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build", + Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."}, + {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build", + Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"}, + {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build", + Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`}, + {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build", + Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."}, + {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build", + Usage: "print the name of the temporary work directory and do not delete it when exiting."}, + {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", + Usage: "print the commands."}, +} + +// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI +var GoRunFlags = GinkgoFlags{ + {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`}, + {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`}, + {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`}, + {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`}, + {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis", + Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`}, + {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis", + Usage: `Write an execution trace to the specified file before exiting.`}, +} + +// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound +// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers +func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) { + errors := []error{} + + if cliConfig.Repeat > 0 && cliConfig.UntilItFails { + errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) + } + + //initialize the output directory + if cliConfig.OutputDir != "" { + err := os.MkdirAll(cliConfig.OutputDir, 0777) + if err != nil { + errors = append(errors, err) + } + } + + //ensure cover mode is configured appropriately + if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" { + goFlagsConfig.CoverProfile = "coverprofile.out" + } + + return cliConfig, goFlagsConfig, errors +} + +// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) { + // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure + // the built test binary can generate a coverprofile + if goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + + args := []string{"test", "-c", "-o", destination, packageToBuild} + goArgs, err := GenerateFlagArgs( + GoBuildFlags, + map[string]interface{}{ + "Go": &goFlagsConfig, + }, + ) + + if err != nil { + return []string{}, err + } + args = append(args, goArgs...) + return args, nil +} + +// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary +func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) { + var flags GinkgoFlags + flags = SuiteConfigFlags.WithPrefix("ginkgo") + flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) + bindings := map[string]interface{}{ + "S": &suiteConfig, + "R": &reporterConfig, + "Go": &goFlagsConfig, + } + + return GenerateFlagArgs(flags, bindings) +} + +// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary +func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { + flags := GoRunFlags.WithPrefix("test") + bindings := map[string]interface{}{ + "Go": &goFlagsConfig, + } + + args, err := GenerateFlagArgs(flags, bindings) + if err != nil { + return args, err + } + args = append(args, "--test.v") + return args, nil +} + +// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command +func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIRunFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command +func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIWatchFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command +func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags + flags = flags.CopyAppend(GoBuildFlags...) + + bindings := map[string]interface{}{ + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Building Multiple Suites" + } + if flagSections[i].Key == "go-build" { + flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags", + Description: "These flags are inherited from go build."} + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} + +func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") + + bindings := map[string]interface{}{ + "C": cliConfig, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Fetching Labels from Multiple Suites" + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/src/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go new file mode 100644 index 00000000..17922304 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "time" +) + +/* + A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters. +*/ + +type SuiteSummary = DeprecatedSuiteSummary +type SetupSummary = DeprecatedSetupSummary +type SpecSummary = DeprecatedSpecSummary +type SpecMeasurement = DeprecatedSpecMeasurement +type SpecComponentType = NodeType +type SpecFailure = DeprecatedSpecFailure + +var ( + SpecComponentTypeInvalid = NodeTypeInvalid + SpecComponentTypeContainer = NodeTypeContainer + SpecComponentTypeIt = NodeTypeIt + SpecComponentTypeBeforeEach = NodeTypeBeforeEach + SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach + SpecComponentTypeAfterEach = NodeTypeAfterEach + SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach + SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite + SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite + SpecComponentTypeAfterSuite = NodeTypeAfterSuite + SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite +) + +type DeprecatedSuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type DeprecatedSetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type DeprecatedSpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*DeprecatedSpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s DeprecatedSpecSummary) HasFailureState() bool { + return s.State.Is(SpecStateFailureStates) +} + +func (s DeprecatedSpecSummary) TimedOut() bool { + return false +} + +func (s DeprecatedSpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s DeprecatedSpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s DeprecatedSpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s DeprecatedSpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s DeprecatedSpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type DeprecatedSpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type DeprecatedSpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s DeprecatedSpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/src/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go new file mode 100644 index 00000000..2948dfa0 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -0,0 +1,170 @@ +package types + +import ( + "os" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type Deprecation struct { + Message string + DocLink string + Version string +} + +type deprecations struct{} + +var Deprecations = deprecations{} + +func (d deprecations) CustomReporter() Deprecation { + return Deprecation{ + Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:", + DocLink: "removed-custom-reporters", + Version: "1.16.0", + } +} + +func (d deprecations) Async() Deprecation { + return Deprecation{ + Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", + DocLink: "removed-async-testing", + Version: "1.16.0", + } +} + +func (d deprecations) Measure() Deprecation { + return Deprecation{ + Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.", + DocLink: "removed-measure", + Version: "1.16.3", + } +} + +func (d deprecations) ParallelNode() Deprecation { + return Deprecation{ + Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.", + DocLink: "renamed-ginkgoparallelnode", + Version: "1.16.4", + } +} + +func (d deprecations) CurrentGinkgoTestDescription() Deprecation { + return Deprecation{ + Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.", + DocLink: "changed-currentginkgotestdescription", + Version: "1.16.0", + } +} + +func (d deprecations) Convert() Deprecation { + return Deprecation{ + Message: "The convert command is deprecated in Ginkgo V2", + DocLink: "removed-ginkgo-convert", + Version: "1.16.0", + } +} + +func (d deprecations) Blur() Deprecation { + return Deprecation{ + Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", + Version: "1.16.0", + } +} + +func (d deprecations) Nodot() Deprecation { + return Deprecation{ + Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.", + DocLink: "removed-ginkgo-nodot", + Version: "1.16.0", + } +} + +type DeprecationTracker struct { + deprecations map[Deprecation][]CodeLocation + lock *sync.Mutex +} + +func NewDeprecationTracker() *DeprecationTracker { + return &DeprecationTracker{ + deprecations: map[Deprecation][]CodeLocation{}, + lock: &sync.Mutex{}, + } +} + +func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { + ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS") + if deprecation.Version != "" && ackVersion != "" { + ack := ParseSemVer(ackVersion) + version := ParseSemVer(deprecation.Version) + if ack.GreaterThanOrEqualTo(version) { + return + } + } + + d.lock.Lock() + defer d.lock.Unlock() + if len(cl) == 1 { + d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) + } else { + d.deprecations[deprecation] = []CodeLocation{} + } +} + +func (d *DeprecationTracker) DidTrackDeprecations() bool { + d.lock.Lock() + defer d.lock.Unlock() + return len(d.deprecations) > 0 +} + +func (d *DeprecationTracker) DeprecationsReport() string { + d.lock.Lock() + defer d.lock.Unlock() + out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") + out += formatter.F("{{light-yellow}}============================================={{/}}\n") + for deprecation, locations := range d.deprecations { + out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") + if deprecation.DocLink != "" { + out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink) + } + for _, location := range locations { + out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) + } + } + out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") + out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION) + return out +} + +type SemVer struct { + Major int + Minor int + Patch int +} + +func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool { + return (s.Major > o.Major) || + (s.Major == o.Major && s.Minor > o.Minor) || + (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch) +} + +func ParseSemVer(semver string) SemVer { + out := SemVer{} + semver = strings.TrimFunc(semver, func(r rune) bool { + return !(unicode.IsNumber(r) || r == '.') + }) + components := strings.Split(semver, ".") + if len(components) > 0 { + out.Major, _ = strconv.Atoi(components[0]) + } + if len(components) > 1 { + out.Minor, _ = strconv.Atoi(components[1]) + } + if len(components) > 2 { + out.Patch, _ = strconv.Atoi(components[2]) + } + return out +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/src/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go new file mode 100644 index 00000000..1d96ae02 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go @@ -0,0 +1,43 @@ +package types + +import "encoding/json" + +type EnumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func NewEnumSupport(toString map[uint]string) EnumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es EnumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es EnumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/src/vendor/github.com/onsi/ginkgo/v2/types/errors.go new file mode 100644 index 00000000..40331d29 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -0,0 +1,534 @@ +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoError struct { + Heading string + Message string + DocLink string + CodeLocation CodeLocation +} + +func (g GinkgoError) Error() string { + out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading) + if (g.CodeLocation != CodeLocation{}) { + contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ") + if contentsOfLine != "" { + out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine) + } + out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation) + } + if g.Message != "" { + out += formatter.Fiw(1, formatter.COLS, g.Message) + out += "\n\n" + } + if g.DocLink != "" { + out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink) + } + + return out +} + +type ginkgoErrors struct{} + +var GinkgoErrors = ginkgoErrors{} + +func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error { + return GinkgoError{ + Heading: "Your Test Panicked", + Message: `When you, or your assertion library, calls Ginkgo's Fail(), +Ginkgo panics to prevent subsequent assertions from running. + +Normally Ginkgo rescues this panic so you shouldn't see it. + +However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. + +Alternatively, you may have made an assertion outside of a Ginkgo +leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to +an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`, + DocLink: "mental-model-how-ginkgo-handles-failure", + CodeLocation: cl, + } +} + +func (g ginkgoErrors) RerunningSuite() error { + return GinkgoError{ + Heading: "Rerunning Suite", + Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`), + DocLink: "repeating-spec-runs-and-managing-flaky-specs", + } +} + +/* Tree construction errors */ + +func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node +to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. + +To enable randomization and parallelization Ginkgo requires the spec tree +to be fully constructed up front. In practice, this means that you can +only create nodes like {{bold}}[%s]{{/}} at the top-level or within the +body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy", + } +} + +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { + return GinkgoError{ + Heading: "Assertion or Panic detected during tree construction", + Message: formatter.F( + `Ginkgo detected a panic while constructing the spec tree. +You may be trying to make an assertion in the body of a container node +(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}). + +Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}}, +{{bold}}It{{/}}, etc. + +{{bold}}Here's the content of the panic that was caught:{{/}} +%v`, caughtPanic), + CodeLocation: cl, + DocLink: "no-assertions-in-container-nodes", + } +} + +func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node but +you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}. + +Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown), + CodeLocation: cl, + DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite", + } +} + +/* Decorator errors */ +func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error { + return GinkgoError{ + Heading: "Invalid Decorator", + Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: Focused and Pending", + Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { + return GinkgoError{ + Heading: "Unknown Decorator", + Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. +You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Multiple Functions", + Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but more than one was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Missing Functions", + Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but none was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +/* Ordered Container errors */ +func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Serial Node in Non-Serial Ordered Container", + Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Setup Node not in Ordered Container", + Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType), + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +/* DeferCleanup errors */ +func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup requires a valid function", + Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup must be called inside a setup or subject node", + Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup cannot be called in a DeferCleanup callback", + Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +/* ReportEntry errors */ +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { + return GinkgoError{ + Heading: "Too Many ReportEntry Values", + Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +/* By errors */ +func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "documenting-complex-specs-by", + } +} + +/* FileFilter and SkipFilter errors */ +func (g ginkgoErrors) InvalidFileFilter(filter string) error { + return GinkgoError{ + Heading: "Invalid File Filter", + Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter), + DocLink: "filtering-specs", + } +} + +func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error { + return GinkgoError{ + Heading: "Invalid File Filter Regular Expression", + Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err), + DocLink: "filtering-specs", + } +} + +/* Label Errors */ +func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error { + var message string + if location >= 0 { + for i, r := range input { + if i == location { + message += "{{red}}{{bold}}{{underline}}" + } + message += string(r) + if i == location { + message += "{{/}}" + } + } + } else { + message = input + } + message += "\n" + error + return GinkgoError{ + Heading: "Syntax Error Parsing Label Filter", + Message: message, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Label", + Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label), + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty Label", + Message: "Labels cannot be empty", + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +/* Table errors */ +func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed multiple functions", + Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Entry description", + Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed incorrect parameter type", + Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too many parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +/* Parallel Synchronization errors */ + +func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { + return GinkgoError{ + Heading: "Test Report unavailable because a Ginkgo parallel process disappeared", + Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { + return GinkgoError{ + Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1", + Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { + return GinkgoError{ + Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back", + Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", + } +} + +/* Configuration errors */ + +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { + return GinkgoError{ + Heading: "Unknown Type passed to RunSpecs", + Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), + } +} + +var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead." + +func (g ginkgoErrors) InvalidParallelTotalConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.total must be >= 1", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) InvalidParallelProcessConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) MissingParallelHostConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.host is missing", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) UnreachableParallelHost(host string) error { + return GinkgoError{ + Heading: "Could not reach ginkgo.parallel.host:" + host, + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) DryRunInParallelConfiguration() error { + return GinkgoError{ + Heading: "Ginkgo only performs -dryRun in serial mode.", + Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.", + } +} + +func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { + return GinkgoError{ + Heading: "Conflicting reporter verbosity settings.", + Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!", + } +} + +func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error { + return GinkgoError{ + Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value), + Message: "You must choose one of 'dup', 'swap', or 'none'.", + } +} + +func (g ginkgoErrors) InvalidGoFlagCount() error { + return GinkgoError{ + Heading: "Use of go test -count", + Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.", + } +} + +func (g ginkgoErrors) InvalidGoFlagParallel() error { + return GinkgoError{ + Heading: "Use of go test -parallel", + Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.", + } +} + +func (g ginkgoErrors) BothRepeatAndUntilItFails() error { + return GinkgoError{ + Heading: "--repeat and --until-it-fails are both set", + Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", + } +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/src/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go new file mode 100644 index 00000000..cc21df71 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go @@ -0,0 +1,106 @@ +package types + +import ( + "regexp" + "strconv" + "strings" +) + +func ParseFileFilters(filters []string) (FileFilters, error) { + ffs := FileFilters{} + for _, filter := range filters { + ff := FileFilter{} + if filter == "" { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + components := strings.Split(filter, ":") + if !(len(components) == 1 || len(components) == 2) { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + + var err error + ff.Filename, err = regexp.Compile(components[0]) + if err != nil { + return nil, err + } + if len(components) == 2 { + lineFilters := strings.Split(components[1], ",") + for _, lineFilter := range lineFilters { + components := strings.Split(lineFilter, "-") + if len(components) == 1 { + line, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1}) + } else if len(components) == 2 { + line1, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + line2, err := strconv.Atoi(strings.TrimSpace(components[1])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2}) + } else { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + } + } + ffs = append(ffs, ff) + } + return ffs, nil +} + +type FileFilter struct { + Filename *regexp.Regexp + LineFilters LineFilters +} + +func (f FileFilter) Matches(locations []CodeLocation) bool { + for _, location := range locations { + if f.Filename.MatchString(location.FileName) && + f.LineFilters.Matches(location.LineNumber) { + return true + } + + } + return false +} + +type FileFilters []FileFilter + +func (ffs FileFilters) Matches(locations []CodeLocation) bool { + for _, ff := range ffs { + if ff.Matches(locations) { + return true + } + } + + return false +} + +type LineFilter struct { + Min int + Max int +} + +func (lf LineFilter) Matches(line int) bool { + return lf.Min <= line && line < lf.Max +} + +type LineFilters []LineFilter + +func (lfs LineFilters) Matches(line int) bool { + if len(lfs) == 0 { + return true + } + + for _, lf := range lfs { + if lf.Matches(line) { + return true + } + } + return false +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/src/vendor/github.com/onsi/ginkgo/v2/types/flags.go new file mode 100644 index 00000000..9186ae87 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -0,0 +1,489 @@ +package types + +import ( + "flag" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoFlag struct { + Name string + KeyPath string + SectionKey string + + Usage string + UsageArgument string + UsageDefaultValue string + + DeprecatedName string + DeprecatedDocLink string + DeprecatedVersion string + + ExportAs string +} + +type GinkgoFlags []GinkgoFlag + +func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags { + out := GinkgoFlags{} + out = append(out, f...) + out = append(out, flags...) + return out +} + +func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags { + if prefix == "" { + return f + } + out := GinkgoFlags{} + for _, flag := range f { + if flag.Name != "" { + flag.Name = prefix + "." + flag.Name + } + if flag.DeprecatedName != "" { + flag.DeprecatedName = prefix + "." + flag.DeprecatedName + } + if flag.ExportAs != "" { + flag.ExportAs = prefix + "." + flag.ExportAs + } + out = append(out, flag) + } + return out +} + +func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags { + out := GinkgoFlags{} + for _, flag := range f { + for _, name := range names { + if flag.Name == name { + out = append(out, flag) + break + } + } + } + return out +} + +type GinkgoFlagSection struct { + Key string + Style string + Succinct bool + Heading string + Description string +} + +type GinkgoFlagSections []GinkgoFlagSection + +func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { + for _, section := range gfs { + if section.Key == key { + return section, true + } + } + + return GinkgoFlagSection{}, false +} + +type GinkgoFlagSet struct { + flags GinkgoFlags + bindings interface{} + + sections GinkgoFlagSections + extraGoFlagsSection GinkgoFlagSection + + flagSet *flag.FlagSet +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet +func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + }, nil) +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + extraGoFlagsSection: extraGoFlagsSection, + }, flagSet) +} + +func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) { + if flagSet == nil { + f.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + //suppress all output as Ginkgo is responsible for formatting usage + f.flagSet.SetOutput(io.Discard) + } else { + f.flagSet = flagSet + //we're piggybacking on an existing flagset (typically go test) so we have limited control + //on user feedback + f.flagSet.Usage = f.substituteUsage + } + + for _, flag := range f.flags { + name := flag.Name + + deprecatedUsage := "[DEPRECATED]" + deprecatedName := flag.DeprecatedName + if name != "" { + deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name) + } else if flag.Usage != "" { + deprecatedUsage += " " + flag.Usage + } + + value, ok := valueAtKeyPath(f.bindings, flag.KeyPath) + if !ok { + return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface, addr := value.Interface(), value.Addr().Interface() + + switch value.Type() { + case reflect.TypeOf(string("")): + if name != "" { + f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage) + } + case reflect.TypeOf(int64(0)): + if name != "" { + f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage) + } + case reflect.TypeOf(float64(0)): + if name != "" { + f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage) + } + case reflect.TypeOf(int(0)): + if name != "" { + f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage) + } + case reflect.TypeOf(bool(true)): + if name != "" { + f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage) + } + case reflect.TypeOf(time.Duration(0)): + if name != "" { + f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage) + } + + case reflect.TypeOf([]string{}): + if name != "" { + f.flagSet.Var(stringSliceVar{value}, name, flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage) + } + default: + return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return f, nil +} + +func (f GinkgoFlagSet) IsZero() bool { + return f.flagSet == nil +} + +func (f GinkgoFlagSet) WasSet(name string) bool { + found := false + f.flagSet.Visit(func(f *flag.Flag) { + if f.Name == name { + found = true + } + }) + + return found +} + +func (f GinkgoFlagSet) Lookup(name string) *flag.Flag { + return f.flagSet.Lookup(name) +} + +func (f GinkgoFlagSet) Parse(args []string) ([]string, error) { + if f.IsZero() { + return args, nil + } + err := f.flagSet.Parse(args) + if err != nil { + return []string{}, err + } + return f.flagSet.Args(), nil +} + +func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) { + if f.IsZero() { + return + } + f.flagSet.Visit(func(flag *flag.Flag) { + for _, ginkgoFlag := range f.flags { + if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) { + message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName) + if ginkgoFlag.Name != "" { + message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name) + } else if ginkgoFlag.Usage != "" { + message += " " + ginkgoFlag.Usage + } + + deprecationTracker.TrackDeprecation(Deprecation{ + Message: message, + DocLink: ginkgoFlag.DeprecatedDocLink, + Version: ginkgoFlag.DeprecatedVersion, + }) + } + } + }) +} + +func (f GinkgoFlagSet) Usage() string { + if f.IsZero() { + return "" + } + groupedFlags := map[GinkgoFlagSection]GinkgoFlags{} + ungroupedFlags := GinkgoFlags{} + managedFlags := map[string]bool{} + extraGoFlags := []*flag.Flag{} + + for _, flag := range f.flags { + managedFlags[flag.Name] = true + managedFlags[flag.DeprecatedName] = true + + if flag.Name == "" { + continue + } + + section, ok := f.sections.Lookup(flag.SectionKey) + if ok { + groupedFlags[section] = append(groupedFlags[section], flag) + } else { + ungroupedFlags = append(ungroupedFlags, flag) + } + } + + f.flagSet.VisitAll(func(flag *flag.Flag) { + if !managedFlags[flag.Name] { + extraGoFlags = append(extraGoFlags, flag) + } + }) + + out := "" + for _, section := range f.sections { + flags := groupedFlags[section] + if len(flags) == 0 { + continue + } + out += f.usageForSection(section) + if section.Succinct { + succinctFlags := []string{} + for _, flag := range flags { + if flag.Name != "" { + succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name)) + } + } + out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n") + } else { + for _, flag := range flags { + out += f.usageForFlag(flag, section.Style) + } + } + out += "\n" + } + if len(ungroupedFlags) > 0 { + for _, flag := range ungroupedFlags { + out += f.usageForFlag(flag, "") + } + out += "\n" + } + if len(extraGoFlags) > 0 { + out += f.usageForSection(f.extraGoFlagsSection) + for _, goFlag := range extraGoFlags { + out += f.usageForGoFlag(goFlag) + } + } + + return out +} + +func (f GinkgoFlagSet) substituteUsage() { + fmt.Fprintln(f.flagSet.Output(), f.Usage()) +} + +func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { + if len(keyPath) == 0 { + return reflect.Value{}, false + } + + val := reflect.ValueOf(root) + components := strings.Split(keyPath, ".") + for _, component := range components { + val = reflect.Indirect(val) + switch val.Kind() { + case reflect.Map: + val = val.MapIndex(reflect.ValueOf(component)) + if val.Kind() == reflect.Interface { + val = reflect.ValueOf(val.Interface()) + } + case reflect.Struct: + val = val.FieldByName(component) + default: + return reflect.Value{}, false + } + if (val == reflect.Value{}) { + return reflect.Value{}, false + } + } + + return val, true +} + +func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string { + out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n") + if section.Description != "" { + out += formatter.Fiw(0, formatter.COLS, section.Description+"\n") + } + return out +} + +func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string { + argument := flag.UsageArgument + defValue := flag.UsageDefaultValue + if argument == "" { + value, _ := valueAtKeyPath(f.bindings, flag.KeyPath) + switch value.Type() { + case reflect.TypeOf(string("")): + argument = "string" + case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)): + argument = "int" + case reflect.TypeOf(time.Duration(0)): + argument = "duration" + case reflect.TypeOf(float64(0)): + argument = "float" + case reflect.TypeOf([]string{}): + argument = "string" + } + } + if argument != "" { + argument = "[" + argument + "] " + } + if defValue != "" { + defValue = fmt.Sprintf("(default: %s)", defValue) + } + hyphens := "--" + if len(flag.Name) == 1 { + hyphens = "-" + } + + out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue) + out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage) + return out +} + +func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string { + //Taken directly from the flag package + out := fmt.Sprintf(" -%s", goFlag.Name) + name, usage := flag.UnquoteUsage(goFlag) + if len(name) > 0 { + out += " " + name + } + if len(out) <= 4 { + out += "\t" + } else { + out += "\n \t" + } + out += strings.ReplaceAll(usage, "\n", "\n \t") + out += "\n" + return out +} + +type stringSliceVar struct { + slice reflect.Value +} + +func (ssv stringSliceVar) String() string { return "" } +func (ssv stringSliceVar) Set(s string) error { + ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s}))) + return nil +} + +//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { + result := []string{} + for _, flag := range flags { + name := flag.ExportAs + if name == "" { + name = flag.Name + } + if name == "" { + continue + } + + value, ok := valueAtKeyPath(bindings, flag.KeyPath) + if !ok { + return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface := value.Interface() + switch value.Type() { + case reflect.TypeOf(string("")): + if iface.(string) != "" { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + case reflect.TypeOf(int64(0)): + if iface.(int64) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(float64(0)): + if iface.(float64) != 0 { + result = append(result, fmt.Sprintf("--%s=%f", name, iface)) + } + case reflect.TypeOf(int(0)): + if iface.(int) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(bool(true)): + if iface.(bool) { + result = append(result, fmt.Sprintf("--%s", name)) + } + case reflect.TypeOf(time.Duration(0)): + if iface.(time.Duration) != time.Duration(0) { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + + case reflect.TypeOf([]string{}): + strings := iface.([]string) + for _, s := range strings { + result = append(result, fmt.Sprintf("--%s=%s", name, s)) + } + default: + return []string{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return result, nil +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/src/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go new file mode 100644 index 00000000..0403f9e6 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -0,0 +1,347 @@ +package types + +import ( + "fmt" + "regexp" + "strings" +) + +var DEBUG_LABEL_FILTER_PARSING = false + +type LabelFilter func([]string) bool + +func matchLabelAction(label string) LabelFilter { + expected := strings.ToLower(label) + return func(labels []string) bool { + for i := range labels { + if strings.ToLower(labels[i]) == expected { + return true + } + } + return false + } +} + +func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter { + return func(labels []string) bool { + for i := range labels { + if regex.MatchString(labels[i]) { + return true + } + } + return false + } +} + +func notAction(filter LabelFilter) LabelFilter { + return func(labels []string) bool { return !filter(labels) } +} + +func andAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) && b(labels) } +} + +func orAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) || b(labels) } +} + +type lfToken uint + +const ( + lfTokenInvalid lfToken = iota + + lfTokenRoot + lfTokenOpenGroup + lfTokenCloseGroup + lfTokenNot + lfTokenAnd + lfTokenOr + lfTokenRegexp + lfTokenLabel + lfTokenEOF +) + +func (l lfToken) Precedence() int { + switch l { + case lfTokenRoot, lfTokenOpenGroup: + return 0 + case lfTokenOr: + return 1 + case lfTokenAnd: + return 2 + case lfTokenNot: + return 3 + } + return -1 +} + +func (l lfToken) String() string { + switch l { + case lfTokenRoot: + return "ROOT" + case lfTokenOpenGroup: + return "(" + case lfTokenCloseGroup: + return ")" + case lfTokenNot: + return "!" + case lfTokenAnd: + return "&&" + case lfTokenOr: + return "||" + case lfTokenRegexp: + return "/regexp/" + case lfTokenLabel: + return "label" + case lfTokenEOF: + return "EOF" + } + return "INVALID" +} + +type treeNode struct { + token lfToken + location int + value string + + parent *treeNode + leftNode *treeNode + rightNode *treeNode +} + +func (tn *treeNode) setRightNode(node *treeNode) { + tn.rightNode = node + node.parent = tn +} + +func (tn *treeNode) setLeftNode(node *treeNode) { + tn.leftNode = node + node.parent = tn +} + +func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode { + if tn.token.Precedence() <= precedence { + return tn + } + return tn.parent.firstAncestorWithPrecedenceLEQ(precedence) +} + +func (tn *treeNode) firstUnmatchedOpenNode() *treeNode { + if tn.token == lfTokenOpenGroup { + return tn + } + if tn.parent == nil { + return nil + } + return tn.parent.firstUnmatchedOpenNode() +} + +func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { + switch tn.token { + case lfTokenOpenGroup: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.") + case lfTokenLabel: + return matchLabelAction(tn.value), nil + case lfTokenRegexp: + re, err := regexp.Compile(tn.value) + if err != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) + } + return matchLabelRegexAction(re), nil + } + + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.") + } + rightLF, err := tn.rightNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenRoot, lfTokenCloseGroup: + return rightLF, nil + case lfTokenNot: + return notAction(rightLF), nil + } + + if tn.leftNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token)) + } + leftLF, err := tn.leftNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenAnd: + return andAction(leftLF, rightLF), nil + case lfTokenOr: + return orAction(leftLF, rightLF), nil + } + + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token)) +} + +func (tn *treeNode) tokenString() string { + out := fmt.Sprintf("<%s", tn.token) + if tn.value != "" { + out += " | " + tn.value + } + out += ">" + return out +} + +func (tn *treeNode) toString(indent int) string { + out := tn.tokenString() + "\n" + if tn.leftNode != nil { + out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1)) + } + if tn.rightNode != nil { + out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1)) + } + return out +} + +func tokenize(input string) func() (*treeNode, error) { + runes, i := []rune(input), 0 + + peekIs := func(r rune) bool { + if i+1 < len(runes) { + return runes[i+1] == r + } + return false + } + + consumeUntil := func(cutset string) (string, int) { + j := i + for ; j < len(runes); j++ { + if strings.IndexRune(cutset, runes[j]) >= 0 { + break + } + } + return string(runes[i:j]), j - i + } + + return func() (*treeNode, error) { + for i < len(runes) && runes[i] == ' ' { + i += 1 + } + + if i >= len(runes) { + return &treeNode{token: lfTokenEOF}, nil + } + + node := &treeNode{location: i} + switch runes[i] { + case '&': + if !peekIs('&') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?") + } + i += 2 + node.token = lfTokenAnd + case '|': + if !peekIs('|') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?") + } + i += 2 + node.token = lfTokenOr + case '!': + i += 1 + node.token = lfTokenNot + case ',': + i += 1 + node.token = lfTokenOr + case '(': + i += 1 + node.token = lfTokenOpenGroup + case ')': + i += 1 + node.token = lfTokenCloseGroup + case '/': + i += 1 + value, n := consumeUntil("/") + i += n + 1 + node.token, node.value = lfTokenRegexp, value + default: + value, n := consumeUntil("&|!,()/") + i += n + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) + } + return node, nil + } +} + +func ParseLabelFilter(input string) (LabelFilter, error) { + if DEBUG_LABEL_FILTER_PARSING { + fmt.Println("\n==============") + fmt.Println("Input: ", input) + fmt.Print("Tokens: ") + } + nextToken := tokenize(input) + + root := &treeNode{token: lfTokenRoot} + current := root +LOOP: + for { + node, err := nextToken() + if err != nil { + return nil, err + } + + if DEBUG_LABEL_FILTER_PARSING { + fmt.Print(node.tokenString() + " ") + } + + switch node.token { + case lfTokenEOF: + break LOOP + case lfTokenLabel, lfTokenRegexp: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") + } + current.setRightNode(node) + case lfTokenNot, lfTokenOpenGroup: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token)) + } + current.setRightNode(node) + current = node + case lfTokenAnd, lfTokenOr: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token)) + } + nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence()) + node.setLeftNode(nodeToStealFrom.rightNode) + nodeToStealFrom.setRightNode(node) + current = node + case lfTokenCloseGroup: + firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() + if firstUnmatchedOpenNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.") + } + if firstUnmatchedOpenNode == current && current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.") + } + firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed + current = firstUnmatchedOpenNode.parent + default: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token)) + } + } + if DEBUG_LABEL_FILTER_PARSING { + fmt.Printf("\n Tree:\n%s", root.toString(0)) + } + return root.constructLabelFilter(input) +} + +func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { + out := strings.TrimSpace(label) + if out == "" { + return "", GinkgoErrors.InvalidEmptyLabel(cl) + } + if strings.ContainsAny(out, "&|!,()/") { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + return out, nil +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/src/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go new file mode 100644 index 00000000..c64866c6 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -0,0 +1,185 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" +) + +//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +//and across the network connection when running in parallel +type ReportEntryValue struct { + raw interface{} //unexported to prevent gob from freaking out about unregistered structs + AsJSON string + Representation string +} + +func WrapEntryValue(value interface{}) ReportEntryValue { + return ReportEntryValue{ + raw: value, + } +} + +func (rev ReportEntryValue) GetRawValue() interface{} { + return rev.raw +} + +func (rev ReportEntryValue) String() string { + if rev.raw == nil { + return "" + } + if colorableStringer, ok := rev.raw.(ColorableStringer); ok { + return colorableStringer.ColorableString() + } + + if stringer, ok := rev.raw.(fmt.Stringer); ok { + return stringer.String() + } + if rev.Representation != "" { + return rev.Representation + } + return fmt.Sprintf("%+v", rev.raw) +} + +func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { + //All this to capture the representation at encoding-time, not creating time + //This way users can Report on pointers and get their final values at reporting-time + out := struct { + AsJSON string + Representation string + }{ + Representation: rev.String(), + } + + asJSON, err := json.Marshal(rev.raw) + if err != nil { + return nil, err + } + out.AsJSON = string(asJSON) + + return json.Marshal(out) +} + +func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error { + in := struct { + AsJSON string + Representation string + }{} + err := json.Unmarshal(data, &in) + if err != nil { + return err + } + rev.AsJSON = in.AsJSON + rev.Representation = in.Representation + return json.Unmarshal([]byte(in.AsJSON), &(rev.raw)) +} + +func (rev ReportEntryValue) GobEncode() ([]byte, error) { + return rev.MarshalJSON() +} + +func (rev *ReportEntryValue) GobDecode(data []byte) error { + return rev.UnmarshalJSON(data) +} + +// ReportEntry captures information attached to `SpecReport` via `AddReportEntry` +type ReportEntry struct { + // Visibility captures the visibility policy for this ReportEntry + Visibility ReportEntryVisibility + // Time captures the time the AddReportEntry was called + Time time.Time + // Location captures the location of the AddReportEntry call + Location CodeLocation + // Name captures the name of this report + Name string + // Value captures the (optional) object passed into AddReportEntry - this can be + // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make + // encoding/decoding the value easier. To access the raw value call entry.GetRawValue() + Value ReportEntryValue +} + +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableStirng() is used to generate their representation. +type ColorableStringer interface { + ColorableString() string +} + +// StringRepresentation() returns the string representation of the value associated with the ReportEntry -- +// if value is nil, empty string is returned +// if value is a `ColorableStringer` then `Value.ColorableString()` is returned +// if value is a `fmt.Stringer` then `Value.String()` is returned +// otherwise the value is formatted with "%+v" +func (entry ReportEntry) StringRepresentation() string { + return entry.Value.String() +} + +// GetRawValue returns the Value object that was passed to AddReportEntry +// If called in-process this will be the same object that was passed into AddReportEntry. +// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be +// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON +// field yourself. +func (entry ReportEntry) GetRawValue() interface{} { + return entry.Value.GetRawValue() +} + +type ReportEntries []ReportEntry + +func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + return true + } + } + return false +} + +func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries { + out := ReportEntries{} + + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + out = append(out, entry) + } + } + + return out +} + +// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter +type ReportEntryVisibility uint + +const ( + // Always print out this ReportEntry + ReportEntryVisibilityAlways ReportEntryVisibility = iota + // Only print out this ReportEntry if the spec fails or if the test is run with -v + ReportEntryVisibilityFailureOrVerbose + // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.)) + ReportEntryVisibilityNever +) + +var revEnumSupport = NewEnumSupport(map[uint]string{ + uint(ReportEntryVisibilityAlways): "always", + uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose", + uint(ReportEntryVisibilityNever): "never", +}) + +func (rev ReportEntryVisibility) String() string { + return revEnumSupport.String(uint(rev)) +} +func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error { + out, err := revEnumSupport.UnmarshJSON(b) + *rev = ReportEntryVisibility(out) + return err +} +func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) { + return revEnumSupport.MarshJSON(uint(rev)) +} + +func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool { + for _, visibility := range visibilities { + if v == visibility { + return true + } + } + + return false +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/types.go b/src/vendor/github.com/onsi/ginkgo/v2/types/types.go new file mode 100644 index 00000000..f30d23c6 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -0,0 +1,544 @@ +package types + +import ( + "encoding/json" + "strings" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 +const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +// Report captures information about a Ginkgo test run +type Report struct { + //SuitePath captures the absolute path to the test suite + SuitePath string + + //SuiteDescription captures the description string passed to the DSL's RunSpecs() function + SuiteDescription string + + //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function + SuiteLabels []string + + //SuiteSucceeded captures the success or failure status of the test run + //If true, the test run is considered successful. + //If false, the test run is considered unsuccessful + SuiteSucceeded bool + + //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused + //(i.e an `FIt` or an `FDescribe` + SuiteHasProgrammaticFocus bool + + //SpecialSuiteFailureReasons may contain special failure reasons + //For example, a test suite might be considered "failed" even if none of the individual specs + //have a failure state. For example, if the user has configured --fail-on-pending the test suite + //will have failed if there are pending tests even though all non-pending tests may have passed. In such + //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure. + //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user. + //Since multiple special failure reasons can occur, this field is a slice. + SpecialSuiteFailureReasons []string + + //PreRunStats contains a set of stats captured before the test run begins. This is primarily used + //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) + //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. + PreRunStats PreRunStats + + //StartTime and EndTime capture the start and end time of the test run + StartTime time.Time + EndTime time.Time + + //RunTime captures the duration of the test run + RunTime time.Duration + + //SuiteConfig captures the Ginkgo configuration governing this test run + //SuiteConfig includes information necessary for reproducing an identical test run, + //such as the random seed and any filters applied during the test run + SuiteConfig SuiteConfig + + //SpecReports is a list of all SpecReports generated by this test run + SpecReports SpecReports +} + +//PreRunStats contains a set of stats captured before the test run begins. This is primarily used +//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +type PreRunStats struct { + TotalSpecs int + SpecsThatWillRun int +} + +//Add is ued by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +//to form a complete final report. +func (report Report) Add(other Report) Report { + report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded + + if other.StartTime.Before(report.StartTime) { + report.StartTime = other.StartTime + } + + if other.EndTime.After(report.EndTime) { + report.EndTime = other.EndTime + } + + specialSuiteFailureReasons := []string{} + reasonsLookup := map[string]bool{} + for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} { + for _, reason := range reasons { + if !reasonsLookup[reason] { + reasonsLookup[reason] = true + specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason) + } + } + } + report.SpecialSuiteFailureReasons = specialSuiteFailureReasons + report.RunTime = report.EndTime.Sub(report.StartTime) + + reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) + for i := range report.SpecReports { + reports[i] = report.SpecReports[i] + } + offset := len(report.SpecReports) + for i := range other.SpecReports { + reports[i+offset] = other.SpecReports[i] + } + + report.SpecReports = reports + return report +} + +// SpecReport captures information about a Ginkgo spec. +type SpecReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be + // one of the NodeTypesForSuiteLevelNodes node types) + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + + // State captures whether the spec has passed, failed, etc. + State SpecState + + // IsSerial captures whether the spec has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether the spec appears in an Ordered container + IsInOrderedContainer bool + + // StartTime and EndTime capture the start and end time of the spec + StartTime time.Time + EndTime time.Time + + // RunTime captures the duration of the spec + RunTime time.Duration + + // ParallelProcess captures the parallel process that this spec ran on + ParallelProcess int + + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) + //It includes detailed information about the Failure + Failure Failure + + // NumAttempts captures the number of times this Spec was run. Flakey specs can be retried with + // ginkgo --flake-attempts=N + NumAttempts int + + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter + CapturedGinkgoWriterOutput string + + // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel) + // This is always empty when running in series or calling CurrentSpecReport() + // It is used internally by Ginkgo's reporter + CapturedStdOutErr string + + // ReportEntries contains any reports added via `AddReportEntry` + ReportEntries ReportEntries +} + +func (report SpecReport) MarshalJSON() ([]byte, error) { + //All this to avoid emitting an empty Failure struct in the JSON + out := struct { + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + }{ + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, + } + + if !report.Failure.IsZero() { + out.Failure = &(report.Failure) + } + if len(report.ReportEntries) > 0 { + out.ReportEntries = report.ReportEntries + } + + return json.Marshal(out) +} + +// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput +// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty. +// CombinedOutput() is used internally by Ginkgo's reporter. +func (report SpecReport) CombinedOutput() string { + if report.CapturedStdOutErr == "" { + return report.CapturedGinkgoWriterOutput + } + if report.CapturedGinkgoWriterOutput == "" { + return report.CapturedStdOutErr + } + return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput +} + +//Failed returns true if report.State is one of the SpecStateFailureStates +// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) +func (report SpecReport) Failed() bool { + return report.State.Is(SpecStateFailureStates) +} + +//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report SpecReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + if report.LeafNodeText != "" { + texts = append(texts, report.LeafNodeText) + } + return strings.Join(texts, " ") +} + +//Labels returns a deduped set of all the spec's Labels. +func (report SpecReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + for _, label := range report.LeafNodeLabels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + + return out +} + +//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { + filter, err := ParseLabelFilter(query) + if err != nil { + return false, err + } + return filter(report.Labels()), nil +} + +//FileName() returns the name of the file containing the spec +func (report SpecReport) FileName() string { + return report.LeafNodeLocation.FileName +} + +//LineNumber() returns the line number of the leaf node +func (report SpecReport) LineNumber() int { + return report.LeafNodeLocation.LineNumber +} + +//FailureMessage() returns the failure message (or empty string if the test hasn't failed) +func (report SpecReport) FailureMessage() string { + return report.Failure.Message +} + +//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +func (report SpecReport) FailureLocation() CodeLocation { + return report.Failure.Location +} + +type SpecReports []SpecReport + +//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { + count := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + count++ + } + } + + out := make(SpecReports, count) + j := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + out[j] = reports[i] + j++ + } + } + return out +} + +//WithState returns the subset of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) WithState(states SpecState) SpecReports { + count := 0 + for i := range reports { + if reports[i].State.Is(states) { + count++ + } + } + + out, j := make(SpecReports, count), 0 + for i := range reports { + if reports[i].State.Is(states) { + out[j] = reports[i] + j++ + } + } + return out +} + +//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) CountWithState(states SpecState) int { + n := 0 + for i := range reports { + if reports[i].State.Is(states) { + n += 1 + } + } + return n +} + +//CountWithState returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfFlakedSpecs() int { + n := 0 + for i := range reports { + if reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// Failure captures failure information for an individual test +type Failure struct { + // Message - the failure message passed into Fail(...). When using a matcher library + // like Gomega, this will contain the failure message generated by Gomega. + // + // Message is also populated if the user has called Skip(...). + Message string + + // Location - the CodeLocation where the failure occurred + // This CodeLocation will include a fully-populated StackTrace + Location CodeLocation + + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) + // then ForwardedPanic will be populated with a string representation of the captured panic. + ForwardedPanic string `json:",omitempty"` + + // FailureNodeContext - one of three contexts describing the node in which the failure occurred: + // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated + // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocaiton, and FailureNodeContainerIndex will be populated. + // + // FailureNodeType will contain the NodeType of the node in which the failure occurred. + // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. + // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. + FailureNodeContext FailureNodeContext + FailureNodeType NodeType + FailureNodeLocation CodeLocation + FailureNodeContainerIndex int +} + +func (f Failure) IsZero() bool { + return f == Failure{} +} + +// FailureNodeContext captures the location context for the node containing the failing line of code +type FailureNodeContext uint + +const ( + FailureNodeContextInvalid FailureNodeContext = iota + + FailureNodeIsLeafNode + FailureNodeAtTopLevel + FailureNodeInContainer +) + +var fncEnumSupport = NewEnumSupport(map[uint]string{ + uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT", + uint(FailureNodeIsLeafNode): "leaf-node", + uint(FailureNodeAtTopLevel): "top-level", + uint(FailureNodeInContainer): "in-container", +}) + +func (fnc FailureNodeContext) String() string { + return fncEnumSupport.String(uint(fnc)) +} +func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error { + out, err := fncEnumSupport.UnmarshJSON(b) + *fnc = FailureNodeContext(out) + return err +} +func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { + return fncEnumSupport.MarshJSON(uint(fnc)) +} + +// SpecState captures the state of a spec +// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` +type SpecState uint + +const ( + SpecStateInvalid SpecState = 0 + + SpecStatePending SpecState = 1 << iota + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStateAborted + SpecStatePanicked + SpecStateInterrupted +) + +var ssEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecStateInvalid): "INVALID SPEC STATE", + uint(SpecStatePending): "pending", + uint(SpecStateSkipped): "skipped", + uint(SpecStatePassed): "passed", + uint(SpecStateFailed): "failed", + uint(SpecStateAborted): "aborted", + uint(SpecStatePanicked): "panicked", + uint(SpecStateInterrupted): "interrupted", +}) + +func (ss SpecState) String() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss *SpecState) UnmarshalJSON(b []byte) error { + out, err := ssEnumSupport.UnmarshJSON(b) + *ss = SpecState(out) + return err +} +func (ss SpecState) MarshalJSON() ([]byte, error) { + return ssEnumSupport.MarshJSON(uint(ss)) +} + +var SpecStateFailureStates = SpecStateFailed | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted + +func (ss SpecState) Is(states SpecState) bool { + return ss&states != 0 +} + +// NodeType captures the type of a given Ginkgo Node +type NodeType uint + +const ( + NodeTypeInvalid NodeType = 0 + + NodeTypeContainer NodeType = 1 << iota + NodeTypeIt + + NodeTypeBeforeEach + NodeTypeJustBeforeEach + NodeTypeAfterEach + NodeTypeJustAfterEach + + NodeTypeBeforeAll + NodeTypeAfterAll + + NodeTypeBeforeSuite + NodeTypeSynchronizedBeforeSuite + NodeTypeAfterSuite + NodeTypeSynchronizedAfterSuite + + NodeTypeReportBeforeEach + NodeTypeReportAfterEach + NodeTypeReportAfterSuite + + NodeTypeCleanupInvalid + NodeTypeCleanupAfterEach + NodeTypeCleanupAfterAll + NodeTypeCleanupAfterSuite +) + +var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite + +var ntEnumSupport = NewEnumSupport(map[uint]string{ + uint(NodeTypeInvalid): "INVALID NODE TYPE", + uint(NodeTypeContainer): "Container", + uint(NodeTypeIt): "It", + uint(NodeTypeBeforeEach): "BeforeEach", + uint(NodeTypeJustBeforeEach): "JustBeforeEach", + uint(NodeTypeAfterEach): "AfterEach", + uint(NodeTypeJustAfterEach): "JustAfterEach", + uint(NodeTypeBeforeAll): "BeforeAll", + uint(NodeTypeAfterAll): "AfterAll", + uint(NodeTypeBeforeSuite): "BeforeSuite", + uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite", + uint(NodeTypeAfterSuite): "AfterSuite", + uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", + uint(NodeTypeReportBeforeEach): "ReportBeforeEach", + uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportAfterSuite): "ReportAfterSuite", + uint(NodeTypeCleanupInvalid): "INVALID CLEANUP NODE", + uint(NodeTypeCleanupAfterEach): "DeferCleanup", + uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", + uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", +}) + +func (nt NodeType) String() string { + return ntEnumSupport.String(uint(nt)) +} +func (nt *NodeType) UnmarshalJSON(b []byte) error { + out, err := ntEnumSupport.UnmarshJSON(b) + *nt = NodeType(out) + return err +} +func (nt NodeType) MarshalJSON() ([]byte, error) { + return ntEnumSupport.MarshJSON(uint(nt)) +} + +func (nt NodeType) Is(nodeTypes NodeType) bool { + return nt&nodeTypes != 0 +} diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/version.go b/src/vendor/github.com/onsi/ginkgo/v2/types/version.go new file mode 100644 index 00000000..f4015031 --- /dev/null +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -0,0 +1,3 @@ +package types + +const VERSION = "2.1.4" diff --git a/src/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/src/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index d61301b1..11f4fc36 100644 --- a/src/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/src/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -1,3 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package inspector // This file defines func typeOf(ast.Node) uint64. @@ -5,7 +9,11 @@ package inspector // The initial map-based implementation was too slow; // see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 -import "go/ast" +import ( + "go/ast" + + "golang.org/x/tools/internal/typeparams" +) const ( nArrayType = iota @@ -43,6 +51,7 @@ const ( nImportSpec nIncDecStmt nIndexExpr + nIndexListExpr nInterfaceType nKeyValueExpr nLabeledStmt @@ -160,6 +169,8 @@ func typeOf(n ast.Node) uint64 { return 1 << nIncDecStmt case *ast.IndexExpr: return 1 << nIndexExpr + case *typeparams.IndexListExpr: + return 1 << nIndexListExpr case *ast.InterfaceType: return 1 << nInterfaceType case *ast.KeyValueExpr: diff --git a/src/vendor/golang.org/x/tools/internal/typeparams/common.go b/src/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 00000000..ab6b30b8 --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,180 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that interact +// with generic Go code, as introduced with Go 1.18. +// +// Many of the types and functions in this package are proxies for the new APIs +// introduced in the standard library with Go 1.18. For example, the +// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec +// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go +// versions older than 1.18 these helpers are implemented as stubs, allowing +// users of this package to write code that handles generic constructs inline, +// even if the Go version being used to compile does not support generics. +// +// Additionally, this package contains common utilities for working with the +// new generic constructs, to supplement the standard library APIs. Notably, +// the StructuralTerms API computes a minimal representation of the structural +// restrictions on a type parameter. In the future, this API may be available +// from go/types. +// +// See the example/README.md for a more detailed guide on how to update tools +// to support generics. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter. +func IsTypeParam(t types.Type) bool { + _, ok := t.(*TypeParam) + return ok +} + +// OriginMethod returns the origin method associated with the method fn. +// For methods on a non-generic receiver base type, this is just +// fn. However, for methods with a generic receiver, OriginMethod returns the +// corresponding method in the method set of the origin type. +// +// As a special case, if fn is not a method (has no receiver), OriginMethod +// returns fn. +func OriginMethod(fn *types.Func) *types.Func { + recv := fn.Type().(*types.Signature).Recv() + if recv == nil { + + return fn + } + base := recv.Type() + p, isPtr := base.(*types.Pointer) + if isPtr { + base = p.Elem() + } + named, isNamed := base.(*types.Named) + if !isNamed { + // Receiver is a *types.Interface. + return fn + } + if ForNamed(named).Len() == 0 { + // Receiver base has no type parameters, so we can avoid the lookup below. + return fn + } + orig := NamedTypeOrigin(named) + gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + return gfn.(*types.Func) +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := ForNamed(VN) + ttparams := ForNamed(TN) + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/src/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/src/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go new file mode 100644 index 00000000..18212390 --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go @@ -0,0 +1,12 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package typeparams + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = false diff --git a/src/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/src/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go new file mode 100644 index 00000000..d6714882 --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go @@ -0,0 +1,15 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typeparams + +// Note: this constant is in a separate file as this is the only acceptable +// diff between the <1.18 API of this package and the 1.18 API. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = true diff --git a/src/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/src/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 00000000..090f142a --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,216 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// type T[P interface{~int; m()}] int +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *TypeParam) ([]*Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *Union) ([]*Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*Term + for _, term := range tset.terms { + terms = append(terms, NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *TypeParam, *Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/src/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/src/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 00000000..10857d50 --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,172 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 βˆͺ y2 βˆͺ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "βˆ…" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" βˆͺ ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓀 term, the entire list is 𝓀. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓀 term, the entire list is 𝓀. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓀 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// If the type set represented by xl is specified by a single (non-𝓀) term, +// structuralType returns that type. Otherwise it returns nil. +func (xl termlist) structuralType() types.Type { + if nl := xl.norm(); len(nl) == 1 { + return nl[0].typ // if nl.isAll() then typ is nil, which is ok + } + return nil +} + +// union returns the union xl βˆͺ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y βŠ† xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl βŠ† yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/src/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/src/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go new file mode 100644 index 00000000..b4788978 --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -0,0 +1,197 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +func unsupported() { + panic("type parameters are unsupported at this go version") +} + +// IndexListExpr is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type IndexListExpr struct { + ast.Expr + X ast.Expr // expression + Lbrack token.Pos // position of "[" + Indices []ast.Expr // index expressions + Rbrack token.Pos // position of "]" +} + +// ForTypeSpec returns an empty field list, as type parameters on not supported +// at this Go version. +func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { + return nil +} + +// ForFuncType returns an empty field list, as type parameters are not +// supported at this Go version. +func ForFuncType(*ast.FuncType) *ast.FieldList { + return nil +} + +// TypeParam is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type TypeParam struct{ types.Type } + +func (*TypeParam) Index() int { unsupported(); return 0 } +func (*TypeParam) Constraint() types.Type { unsupported(); return nil } +func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } + +// TypeParamList is a placeholder for an empty type parameter list. +type TypeParamList struct{} + +func (*TypeParamList) Len() int { return 0 } +func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } + +// TypeList is a placeholder for an empty type list. +type TypeList struct{} + +func (*TypeList) Len() int { return 0 } +func (*TypeList) At(int) types.Type { unsupported(); return nil } + +// NewTypeParam is unsupported at this Go version, and panics. +func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { + unsupported() + return nil +} + +// SetTypeParamConstraint is unsupported at this Go version, and panics. +func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { + unsupported() +} + +// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or +// typeParams is non-empty. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + if len(recvTypeParams) != 0 || len(typeParams) != 0 { + panic("signatures cannot have type parameters at this Go version") + } + return types.NewSignature(recv, params, results, variadic) +} + +// ForSignature returns an empty slice. +func ForSignature(*types.Signature) *TypeParamList { + return nil +} + +// RecvTypeParams returns a nil slice. +func RecvTypeParams(sig *types.Signature) *TypeParamList { + return nil +} + +// IsComparable returns false, as no interfaces are type-restricted at this Go +// version. +func IsComparable(*types.Interface) bool { + return false +} + +// IsMethodSet returns true, as no interfaces are type-restricted at this Go +// version. +func IsMethodSet(*types.Interface) bool { + return true +} + +// IsImplicit returns false, as no interfaces are implicit at this Go version. +func IsImplicit(*types.Interface) bool { + return false +} + +// MarkImplicit does nothing, because this Go version does not have implicit +// interfaces. +func MarkImplicit(*types.Interface) {} + +// ForNamed returns an empty type parameter list, as type parameters are not +// supported at this Go version. +func ForNamed(*types.Named) *TypeParamList { + return nil +} + +// SetForNamed panics if tparams is non-empty. +func SetForNamed(_ *types.Named, tparams []*TypeParam) { + if len(tparams) > 0 { + unsupported() + } +} + +// NamedTypeArgs returns nil. +func NamedTypeArgs(*types.Named) *TypeList { + return nil +} + +// NamedTypeOrigin is the identity method at this Go version. +func NamedTypeOrigin(named *types.Named) types.Type { + return named +} + +// Term holds information about a structural type restriction. +type Term struct { + tilde bool + typ types.Type +} + +func (m *Term) Tilde() bool { return m.tilde } +func (m *Term) Type() types.Type { return m.typ } +func (m *Term) String() string { + pre := "" + if m.tilde { + pre = "~" + } + return pre + m.typ.String() +} + +// NewTerm is unsupported at this Go version, and panics. +func NewTerm(tilde bool, typ types.Type) *Term { + return &Term{tilde, typ} +} + +// Union is a placeholder type, as type parameters are not supported at this Go +// version. Its methods panic on use. +type Union struct{ types.Type } + +func (*Union) Len() int { return 0 } +func (*Union) Term(i int) *Term { unsupported(); return nil } + +// NewUnion is unsupported at this Go version, and panics. +func NewUnion(terms []*Term) *Union { + unsupported() + return nil +} + +// InitInstanceInfo is a noop at this Go version. +func InitInstanceInfo(*types.Info) {} + +// Instance is a placeholder type, as type parameters are not supported at this +// Go version. +type Instance struct { + TypeArgs *TypeList + Type types.Type +} + +// GetInstances returns a nil map, as type parameters are not supported at this +// Go version. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } + +// Context is a placeholder type, as type parameters are not supported at +// this Go version. +type Context struct{} + +// NewContext returns a placeholder Context instance. +func NewContext() *Context { + return &Context{} +} + +// Instantiate is unsupported on this Go version, and panics. +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + unsupported() + return nil, nil +} diff --git a/src/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/src/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go new file mode 100644 index 00000000..114a36b8 --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -0,0 +1,151 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// IndexListExpr is an alias for ast.IndexListExpr. +type IndexListExpr = ast.IndexListExpr + +// ForTypeSpec returns n.TypeParams. +func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// ForFuncType returns n.TypeParams. +func ForFuncType(n *ast.FuncType) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// TypeParam is an alias for types.TypeParam +type TypeParam = types.TypeParam + +// TypeParamList is an alias for types.TypeParamList +type TypeParamList = types.TypeParamList + +// TypeList is an alias for types.TypeList +type TypeList = types.TypeList + +// NewTypeParam calls types.NewTypeParam. +func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { + return types.NewTypeParam(name, constraint) +} + +// SetTypeParamConstraint calls tparam.SetConstraint(constraint). +func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { + tparam.SetConstraint(constraint) +} + +// NewSignatureType calls types.NewSignatureType. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) +} + +// ForSignature returns sig.TypeParams() +func ForSignature(sig *types.Signature) *TypeParamList { + return sig.TypeParams() +} + +// RecvTypeParams returns sig.RecvTypeParams(). +func RecvTypeParams(sig *types.Signature) *TypeParamList { + return sig.RecvTypeParams() +} + +// IsComparable calls iface.IsComparable(). +func IsComparable(iface *types.Interface) bool { + return iface.IsComparable() +} + +// IsMethodSet calls iface.IsMethodSet(). +func IsMethodSet(iface *types.Interface) bool { + return iface.IsMethodSet() +} + +// IsImplicit calls iface.IsImplicit(). +func IsImplicit(iface *types.Interface) bool { + return iface.IsImplicit() +} + +// MarkImplicit calls iface.MarkImplicit(). +func MarkImplicit(iface *types.Interface) { + iface.MarkImplicit() +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(named *types.Named) *TypeParamList { + return named.TypeParams() +} + +// SetForNamed sets the type params tparams on n. Each tparam must be of +// dynamic type *types.TypeParam. +func SetForNamed(n *types.Named, tparams []*TypeParam) { + n.SetTypeParams(tparams) +} + +// NamedTypeArgs returns named.TypeArgs(). +func NamedTypeArgs(named *types.Named) *TypeList { + return named.TypeArgs() +} + +// NamedTypeOrigin returns named.Orig(). +func NamedTypeOrigin(named *types.Named) types.Type { + return named.Origin() +} + +// Term is an alias for types.Term. +type Term = types.Term + +// NewTerm calls types.NewTerm. +func NewTerm(tilde bool, typ types.Type) *Term { + return types.NewTerm(tilde, typ) +} + +// Union is an alias for types.Union +type Union = types.Union + +// NewUnion calls types.NewUnion. +func NewUnion(terms []*Term) *Union { + return types.NewUnion(terms) +} + +// InitInstanceInfo initializes info to record information about type and +// function instances. +func InitInstanceInfo(info *types.Info) { + info.Instances = make(map[*ast.Ident]types.Instance) +} + +// Instance is an alias for types.Instance. +type Instance = types.Instance + +// GetInstances returns info.Instances. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { + return info.Instances +} + +// Context is an alias for types.Context. +type Context = types.Context + +// NewContext calls types.NewContext. +func NewContext() *Context { + return types.NewContext() +} + +// Instantiate calls types.Instantiate. +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + return types.Instantiate(ctxt, typ, targs, validate) +} diff --git a/src/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/src/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 00000000..7ddee28d --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,170 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// βˆ…: (*term)(nil) == βˆ… // set of no types (empty set) +// 𝓀: &term{} == 𝓀 // set of all types (𝓀niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +// +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "βˆ…" + case x.typ == nil: + return "𝓀" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // βˆ… βŠ‚ x, y βŠ‚ 𝓀 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x βˆͺ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // βˆ… βˆͺ βˆ… == βˆ… + case x == nil: + return y, nil // βˆ… βˆͺ y == y + case y == nil: + return x, nil // x βˆͺ βˆ… == x + case x.typ == nil: + return x, nil // 𝓀 βˆͺ y == 𝓀 + case y.typ == nil: + return y, nil // x βˆͺ 𝓀 == 𝓀 + } + // βˆ… βŠ‚ x, y βŠ‚ 𝓀 + + if x.disjoint(y) { + return x, y // x βˆͺ y == (x, y) if x ∩ y == βˆ… + } + // x.typ == y.typ + + // ~t βˆͺ ~t == ~t + // ~t βˆͺ T == ~t + // T βˆͺ ~t == ~t + // T βˆͺ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // βˆ… ∩ y == βˆ… and ∩ βˆ… == βˆ… + case x.typ == nil: + return y // 𝓀 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓀 == x + } + // βˆ… βŠ‚ x, y βŠ‚ 𝓀 + + if x.disjoint(y) { + return nil // x ∩ y == βˆ… if x ∩ y == βˆ… + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ βˆ… == false + case x.typ == nil: + return true // t ∈ 𝓀 == true + } + // βˆ… βŠ‚ x βŠ‚ 𝓀 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x βŠ† y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // βˆ… βŠ† y == true + case y == nil: + return false // x βŠ† βˆ… == false since x != βˆ… + case y.typ == nil: + return true // x βŠ† 𝓀 == true + case x.typ == nil: + return false // 𝓀 βŠ† y == false since y != 𝓀 + } + // βˆ… βŠ‚ x, y βŠ‚ 𝓀 + + if x.disjoint(y) { + return false // x βŠ† y == false if x ∩ y == βˆ… + } + // x.typ == y.typ + + // ~t βŠ† ~t == true + // ~t βŠ† T == false + // T βŠ† ~t == true + // T βŠ† T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == βˆ…. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index c60fece9..1efee6db 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -45,6 +45,9 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp +# github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 +## explicit; go 1.14 +github.com/google/pprof/profile # github.com/hashicorp/go-hclog v1.2.1 ## explicit; go 1.13 github.com/hashicorp/go-hclog @@ -91,7 +94,6 @@ github.com/nxadm/tail/watch github.com/nxadm/tail/winfile # github.com/onsi/ginkgo v1.16.5 ## explicit; go 1.16 -github.com/onsi/ginkgo github.com/onsi/ginkgo/config github.com/onsi/ginkgo/formatter github.com/onsi/ginkgo/ginkgo @@ -105,20 +107,38 @@ github.com/onsi/ginkgo/ginkgo/watch github.com/onsi/ginkgo/internal/codelocation github.com/onsi/ginkgo/internal/containernode github.com/onsi/ginkgo/internal/failer -github.com/onsi/ginkgo/internal/global github.com/onsi/ginkgo/internal/leafnodes github.com/onsi/ginkgo/internal/remote github.com/onsi/ginkgo/internal/spec github.com/onsi/ginkgo/internal/spec_iterator -github.com/onsi/ginkgo/internal/specrunner -github.com/onsi/ginkgo/internal/suite -github.com/onsi/ginkgo/internal/testingtproxy github.com/onsi/ginkgo/internal/writer github.com/onsi/ginkgo/reporters github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types +# github.com/onsi/ginkgo/v2 v2.1.4 +## explicit; go 1.18 +github.com/onsi/ginkgo/v2 +github.com/onsi/ginkgo/v2/config +github.com/onsi/ginkgo/v2/formatter +github.com/onsi/ginkgo/v2/ginkgo +github.com/onsi/ginkgo/v2/ginkgo/build +github.com/onsi/ginkgo/v2/ginkgo/command +github.com/onsi/ginkgo/v2/ginkgo/generators +github.com/onsi/ginkgo/v2/ginkgo/internal +github.com/onsi/ginkgo/v2/ginkgo/labels +github.com/onsi/ginkgo/v2/ginkgo/outline +github.com/onsi/ginkgo/v2/ginkgo/run +github.com/onsi/ginkgo/v2/ginkgo/unfocus +github.com/onsi/ginkgo/v2/ginkgo/watch +github.com/onsi/ginkgo/v2/internal +github.com/onsi/ginkgo/v2/internal/global +github.com/onsi/ginkgo/v2/internal/interrupt_handler +github.com/onsi/ginkgo/v2/internal/parallel_support +github.com/onsi/ginkgo/v2/internal/testingtproxy +github.com/onsi/ginkgo/v2/reporters +github.com/onsi/ginkgo/v2/types # github.com/onsi/gomega v1.19.0 ## explicit; go 1.18 github.com/onsi/gomega @@ -196,9 +216,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e -## explicit; go 1.12 +# golang.org/x/tools v0.1.10 +## explicit; go 1.17 golang.org/x/tools/go/ast/inspector +golang.org/x/tools/internal/typeparams # google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 ## explicit; go 1.11 google.golang.org/genproto/googleapis/rpc/status