From c621156a19d0e983847df43e13a769d107d6c2b2 Mon Sep 17 00:00:00 2001 From: cliveseldon Date: Wed, 30 Oct 2019 16:22:53 +0000 Subject: [PATCH] request logger vendor additons (#502) --- Gopkg.lock | 43 ++ vendor/github.com/cloudevents/sdk-go/LICENSE | 201 ++++++ vendor/github.com/cloudevents/sdk-go/alias.go | 141 ++++ .../sdk-go/pkg/cloudevents/client/client.go | 195 ++++++ .../pkg/cloudevents/client/defaulters.go | 37 + .../sdk-go/pkg/cloudevents/client/doc.go | 6 + .../pkg/cloudevents/client/observability.go | 68 ++ .../sdk-go/pkg/cloudevents/client/options.go | 53 ++ .../sdk-go/pkg/cloudevents/client/receiver.go | 193 ++++++ .../sdk-go/pkg/cloudevents/content_type.go | 35 + .../sdk-go/pkg/cloudevents/context/context.go | 76 ++ .../sdk-go/pkg/cloudevents/context/doc.go | 5 + .../sdk-go/pkg/cloudevents/context/logger.go | 43 ++ .../pkg/cloudevents/data_content_encoding.go | 11 + .../sdk-go/pkg/cloudevents/datacodec/codec.go | 93 +++ .../sdk-go/pkg/cloudevents/datacodec/doc.go | 5 + .../pkg/cloudevents/datacodec/json/data.go | 97 +++ .../pkg/cloudevents/datacodec/json/doc.go | 4 + .../datacodec/json/observability.go | 63 ++ .../cloudevents/datacodec/observability.go | 63 ++ .../pkg/cloudevents/datacodec/xml/data.go | 90 +++ .../pkg/cloudevents/datacodec/xml/doc.go | 4 + .../datacodec/xml/observability.go | 63 ++ .../cloudevents/sdk-go/pkg/cloudevents/doc.go | 4 + .../sdk-go/pkg/cloudevents/event.go | 97 +++ .../sdk-go/pkg/cloudevents/event_data.go | 99 +++ .../sdk-go/pkg/cloudevents/event_interface.go | 75 ++ .../sdk-go/pkg/cloudevents/event_marshal.go | 281 ++++++++ .../pkg/cloudevents/event_observability.go | 94 +++ .../sdk-go/pkg/cloudevents/event_reader.go | 98 +++ .../sdk-go/pkg/cloudevents/event_response.go | 37 + .../sdk-go/pkg/cloudevents/event_writer.go | 92 +++ .../sdk-go/pkg/cloudevents/eventcontext.go | 108 +++ .../pkg/cloudevents/eventcontext_v01.go | 268 ++++++++ .../cloudevents/eventcontext_v01_reader.go | 90 +++ .../cloudevents/eventcontext_v01_writer.go | 104 +++ .../pkg/cloudevents/eventcontext_v02.go | 286 ++++++++ .../cloudevents/eventcontext_v02_reader.go | 90 +++ .../cloudevents/eventcontext_v02_writer.go | 104 +++ .../pkg/cloudevents/eventcontext_v03.go | 296 ++++++++ .../cloudevents/eventcontext_v03_reader.go | 85 +++ .../cloudevents/eventcontext_v03_writer.go | 108 +++ .../sdk-go/pkg/cloudevents/extensions.go | 13 + .../pkg/cloudevents/observability/doc.go | 4 + .../pkg/cloudevents/observability/keys.go | 19 + .../pkg/cloudevents/observability/observer.go | 109 +++ .../sdk-go/pkg/cloudevents/transport/codec.go | 35 + .../sdk-go/pkg/cloudevents/transport/doc.go | 12 + .../sdk-go/pkg/cloudevents/transport/error.go | 30 + .../pkg/cloudevents/transport/http/codec.go | 176 +++++ .../transport/http/codec_structured.go | 44 ++ .../cloudevents/transport/http/codec_v01.go | 221 ++++++ .../cloudevents/transport/http/codec_v02.go | 252 +++++++ .../cloudevents/transport/http/codec_v03.go | 291 ++++++++ .../pkg/cloudevents/transport/http/context.go | 207 ++++++ .../pkg/cloudevents/transport/http/doc.go | 4 + .../cloudevents/transport/http/encoding.go | 178 +++++ .../pkg/cloudevents/transport/http/message.go | 148 ++++ .../transport/http/observability.go | 109 +++ .../pkg/cloudevents/transport/http/options.go | 266 +++++++ .../cloudevents/transport/http/transport.go | 649 ++++++++++++++++++ .../pkg/cloudevents/transport/message.go | 9 + .../pkg/cloudevents/transport/transport.go | 44 ++ .../sdk-go/pkg/cloudevents/types/allocate.go | 36 + .../sdk-go/pkg/cloudevents/types/doc.go | 4 + .../sdk-go/pkg/cloudevents/types/timestamp.go | 83 +++ .../sdk-go/pkg/cloudevents/types/urlref.go | 77 +++ vendor/go.opencensus.io/AUTHORS | 1 + vendor/go.opencensus.io/LICENSE | 202 ++++++ vendor/go.opencensus.io/internal/internal.go | 37 + vendor/go.opencensus.io/internal/sanitize.go | 50 ++ .../internal/tagencoding/tagencoding.go | 75 ++ .../internal/traceinternals.go | 53 ++ .../go.opencensus.io/metric/metricdata/doc.go | 19 + .../metric/metricdata/exemplar.go | 38 + .../metric/metricdata/label.go | 35 + .../metric/metricdata/metric.go | 46 ++ .../metric/metricdata/point.go | 193 ++++++ .../metric/metricdata/type_string.go | 16 + .../metric/metricdata/unit.go | 27 + .../metric/metricproducer/manager.go | 78 +++ .../metric/metricproducer/producer.go | 28 + vendor/go.opencensus.io/opencensus.go | 21 + vendor/go.opencensus.io/resource/resource.go | 164 +++++ vendor/go.opencensus.io/stats/doc.go | 69 ++ .../go.opencensus.io/stats/internal/record.go | 25 + vendor/go.opencensus.io/stats/measure.go | 109 +++ .../go.opencensus.io/stats/measure_float64.go | 55 ++ .../go.opencensus.io/stats/measure_int64.go | 55 ++ vendor/go.opencensus.io/stats/record.go | 117 ++++ vendor/go.opencensus.io/stats/units.go | 25 + .../stats/view/aggregation.go | 120 ++++ .../stats/view/aggregation_data.go | 293 ++++++++ .../go.opencensus.io/stats/view/collector.go | 86 +++ vendor/go.opencensus.io/stats/view/doc.go | 47 ++ vendor/go.opencensus.io/stats/view/export.go | 58 ++ vendor/go.opencensus.io/stats/view/view.go | 221 ++++++ .../stats/view/view_to_metric.go | 149 ++++ vendor/go.opencensus.io/stats/view/worker.go | 281 ++++++++ .../stats/view/worker_commands.go | 186 +++++ vendor/go.opencensus.io/tag/context.go | 43 ++ vendor/go.opencensus.io/tag/doc.go | 26 + vendor/go.opencensus.io/tag/key.go | 44 ++ vendor/go.opencensus.io/tag/map.go | 229 ++++++ vendor/go.opencensus.io/tag/map_codec.go | 239 +++++++ vendor/go.opencensus.io/tag/metadata.go | 52 ++ vendor/go.opencensus.io/tag/profile_19.go | 31 + vendor/go.opencensus.io/tag/profile_not19.go | 23 + vendor/go.opencensus.io/tag/validate.go | 56 ++ vendor/go.opencensus.io/trace/basetypes.go | 119 ++++ vendor/go.opencensus.io/trace/config.go | 86 +++ vendor/go.opencensus.io/trace/doc.go | 53 ++ vendor/go.opencensus.io/trace/evictedqueue.go | 38 + vendor/go.opencensus.io/trace/export.go | 97 +++ .../trace/internal/internal.go | 22 + vendor/go.opencensus.io/trace/lrumap.go | 61 ++ vendor/go.opencensus.io/trace/sampling.go | 75 ++ vendor/go.opencensus.io/trace/spanbucket.go | 130 ++++ vendor/go.opencensus.io/trace/spanstore.go | 306 +++++++++ vendor/go.opencensus.io/trace/status_codes.go | 37 + vendor/go.opencensus.io/trace/trace.go | 598 ++++++++++++++++ vendor/go.opencensus.io/trace/trace_go11.go | 32 + .../go.opencensus.io/trace/trace_nongo11.go | 25 + .../trace/tracestate/tracestate.go | 147 ++++ 124 files changed, 12573 insertions(+) create mode 100644 vendor/github.com/cloudevents/sdk-go/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/alias.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/client.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_marshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_response.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/keys.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/observer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/codec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/error.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_structured.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/transport.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/allocate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/timestamp.go create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/urlref.go create mode 100644 vendor/go.opencensus.io/AUTHORS create mode 100644 vendor/go.opencensus.io/LICENSE create mode 100644 vendor/go.opencensus.io/internal/internal.go create mode 100644 vendor/go.opencensus.io/internal/sanitize.go create mode 100644 vendor/go.opencensus.io/internal/tagencoding/tagencoding.go create mode 100644 vendor/go.opencensus.io/internal/traceinternals.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/doc.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/exemplar.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/label.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/metric.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/point.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/type_string.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/unit.go create mode 100644 vendor/go.opencensus.io/metric/metricproducer/manager.go create mode 100644 vendor/go.opencensus.io/metric/metricproducer/producer.go create mode 100644 vendor/go.opencensus.io/opencensus.go create mode 100644 vendor/go.opencensus.io/resource/resource.go create mode 100644 vendor/go.opencensus.io/stats/doc.go create mode 100644 vendor/go.opencensus.io/stats/internal/record.go create mode 100644 vendor/go.opencensus.io/stats/measure.go create mode 100644 vendor/go.opencensus.io/stats/measure_float64.go create mode 100644 vendor/go.opencensus.io/stats/measure_int64.go create mode 100644 vendor/go.opencensus.io/stats/record.go create mode 100644 vendor/go.opencensus.io/stats/units.go create mode 100644 vendor/go.opencensus.io/stats/view/aggregation.go create mode 100644 vendor/go.opencensus.io/stats/view/aggregation_data.go create mode 100644 vendor/go.opencensus.io/stats/view/collector.go create mode 100644 vendor/go.opencensus.io/stats/view/doc.go create mode 100644 vendor/go.opencensus.io/stats/view/export.go create mode 100644 vendor/go.opencensus.io/stats/view/view.go create mode 100644 vendor/go.opencensus.io/stats/view/view_to_metric.go create mode 100644 vendor/go.opencensus.io/stats/view/worker.go create mode 100644 vendor/go.opencensus.io/stats/view/worker_commands.go create mode 100644 vendor/go.opencensus.io/tag/context.go create mode 100644 vendor/go.opencensus.io/tag/doc.go create mode 100644 vendor/go.opencensus.io/tag/key.go create mode 100644 vendor/go.opencensus.io/tag/map.go create mode 100644 vendor/go.opencensus.io/tag/map_codec.go create mode 100644 vendor/go.opencensus.io/tag/metadata.go create mode 100644 vendor/go.opencensus.io/tag/profile_19.go create mode 100644 vendor/go.opencensus.io/tag/profile_not19.go create mode 100644 vendor/go.opencensus.io/tag/validate.go create mode 100644 vendor/go.opencensus.io/trace/basetypes.go create mode 100644 vendor/go.opencensus.io/trace/config.go create mode 100644 vendor/go.opencensus.io/trace/doc.go create mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go create mode 100644 vendor/go.opencensus.io/trace/export.go create mode 100644 vendor/go.opencensus.io/trace/internal/internal.go create mode 100644 vendor/go.opencensus.io/trace/lrumap.go create mode 100644 vendor/go.opencensus.io/trace/sampling.go create mode 100644 vendor/go.opencensus.io/trace/spanbucket.go create mode 100644 vendor/go.opencensus.io/trace/spanstore.go create mode 100644 vendor/go.opencensus.io/trace/status_codes.go create mode 100644 vendor/go.opencensus.io/trace/trace.go create mode 100644 vendor/go.opencensus.io/trace/trace_go11.go create mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go create mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go diff --git a/Gopkg.lock b/Gopkg.lock index 2a6de3f25b92..b686983035db 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -33,6 +33,26 @@ revision = "37c8de3658fcb183f997c4e13e8337516ab753e6" version = "v1.0.1" +[[projects]] + digest = "1:d3c3de7c1ad57f795e5c409afa842fea27899f124622bcdce83f8bea8721b9db" + name = "github.com/cloudevents/sdk-go" + packages = [ + ".", + "pkg/cloudevents", + "pkg/cloudevents/client", + "pkg/cloudevents/context", + "pkg/cloudevents/datacodec", + "pkg/cloudevents/datacodec/json", + "pkg/cloudevents/datacodec/xml", + "pkg/cloudevents/observability", + "pkg/cloudevents/transport", + "pkg/cloudevents/transport/http", + "pkg/cloudevents/types", + ] + pruneopts = "NUT" + revision = "4cc108a637ff4bf2d1848c60d5fbb0f711fd1b8c" + version = "v0.9.2" + [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" @@ -547,6 +567,28 @@ revision = "6612da89516247503f03ef76e974b51a434fb52e" version = "v1.13.1" +[[projects]] + digest = "1:9d294b43f67e8d664adb0b5b60f378d78c1f978231918db0d1723b6a5b4f54a8" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/tracestate", + ] + pruneopts = "NUT" + revision = "59d1ce35d30f3c25ba762169da2a37eab6ffa041" + version = "v0.22.1" + [[projects]] digest = "1:cc9d86ec4e6e3bdf87e3a421273bfeed003cf8e21351c0302fe8b0eb7b10efe6" name = "go.uber.org/atomic" @@ -1236,6 +1278,7 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "github.com/cloudevents/sdk-go", "github.com/emicklei/go-restful", "github.com/getkin/kin-openapi/openapi3", "github.com/getkin/kin-openapi/openapi3filter", diff --git a/vendor/github.com/cloudevents/sdk-go/LICENSE b/vendor/github.com/cloudevents/sdk-go/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/alias.go b/vendor/github.com/cloudevents/sdk-go/alias.go new file mode 100644 index 000000000000..f97b6473adff --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/alias.go @@ -0,0 +1,141 @@ +package cloudevents + +// Package cloudevents alias' common functions and types to improve discoverability and reduce +// the number of imports for simple HTTP clients. + +import ( + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/pkg/cloudevents/client" + "github.com/cloudevents/sdk-go/pkg/cloudevents/context" + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http" + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +// Client + +type ClientOption client.Option +type Client = client.Client +type ConvertFn = client.ConvertFn + +// Event + +type Event = cloudevents.Event +type EventResponse = cloudevents.EventResponse + +// Context + +type EventContext = cloudevents.EventContext +type EventContextV01 = cloudevents.EventContextV01 +type EventContextV02 = cloudevents.EventContextV02 +type EventContextV03 = cloudevents.EventContextV03 + +// Custom Types + +type Timestamp = types.Timestamp +type URLRef = types.URLRef + +// HTTP Transport + +type HTTPOption http.Option +type HTTPTransport = http.Transport +type HTTPTransportContext = http.TransportContext +type HTTPTransportResponseContext = http.TransportResponseContext +type HTTPEncoding = http.Encoding + +const ( + // Encoding + + ApplicationXML = cloudevents.ApplicationXML + ApplicationJSON = cloudevents.ApplicationJSON + ApplicationCloudEventsJSON = cloudevents.ApplicationCloudEventsJSON + ApplicationCloudEventsBatchJSON = cloudevents.ApplicationCloudEventsBatchJSON + Base64 = cloudevents.Base64 + + // Event Versions + + VersionV01 = cloudevents.CloudEventsVersionV01 + VersionV02 = cloudevents.CloudEventsVersionV02 + VersionV03 = cloudevents.CloudEventsVersionV03 + + // HTTP Transport Encodings + + HTTPBinaryV01 = http.BinaryV01 + HTTPStructuredV01 = http.StructuredV01 + HTTPBinaryV02 = http.BinaryV02 + HTTPStructuredV02 = http.StructuredV02 + HTTPBinaryV03 = http.BinaryV03 + HTTPStructuredV03 = http.StructuredV03 + HTTPBatchedV03 = http.BatchedV03 + + // Context HTTP Transport Encodings + + Binary = http.Binary + Structured = http.Structured +) + +var ( + // ContentType Helpers + + StringOfApplicationJSON = cloudevents.StringOfApplicationJSON + StringOfApplicationXML = cloudevents.StringOfApplicationXML + StringOfApplicationCloudEventsJSON = cloudevents.StringOfApplicationCloudEventsJSON + StringOfApplicationCloudEventsBatchJSON = cloudevents.StringOfApplicationCloudEventsBatchJSON + StringOfBase64 = cloudevents.StringOfBase64 + + // Client Creation + + NewClient = client.New + NewDefaultClient = client.NewDefault + + // Client Options + + WithEventDefaulter = client.WithEventDefaulter + WithUUIDs = client.WithUUIDs + WithTimeNow = client.WithTimeNow + WithConverterFn = client.WithConverterFn + + // Event Creation + + NewEvent = cloudevents.New + + // Tracing + + EnableTracing = observability.EnableTracing + + // Context + + ContextWithTarget = context.WithTarget + TargetFromContext = context.TargetFrom + ContextWithEncoding = context.WithEncoding + EncodingFromContext = context.EncodingFrom + + // Custom Types + + ParseTimestamp = types.ParseTimestamp + ParseURLRef = types.ParseURLRef + + // HTTP Transport + + NewHTTPTransport = http.New + + // HTTP Transport Options + + WithTarget = http.WithTarget + WithMethod = http.WithMethod + WitHHeader = http.WithHeader + WithShutdownTimeout = http.WithShutdownTimeout + WithEncoding = http.WithEncoding + WithContextBasedEncoding = http.WithContextBasedEncoding + WithBinaryEncoding = http.WithBinaryEncoding + WithStructuredEncoding = http.WithStructuredEncoding + WithPort = http.WithPort + WithPath = http.WithPath + WithMiddleware = http.WithMiddleware + WithLongPollTarget = http.WithLongPollTarget + + // HTTP Context + + HTTPTransportContextFrom = http.TransportContextFrom + ContextWithHeader = http.ContextWithHeader +) diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/client.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/client.go new file mode 100644 index 000000000000..a36fe4718e9a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/client.go @@ -0,0 +1,195 @@ +package client + +import ( + "context" + "fmt" + "sync" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http" +) + +// Client interface defines the runtime contract the CloudEvents client supports. +type Client interface { + // Send will transmit the given event over the client's configured transport. + Send(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) + + // StartReceiver will register the provided function for callback on receipt + // of a cloudevent. It will also start the underlying transport as it has + // been configured. + // This call is blocking. + // Valid fn signatures are: + // * func() + // * func() error + // * func(context.Context) + // * func(context.Context) error + // * func(cloudevents.Event) + // * func(cloudevents.Event) error + // * func(context.Context, cloudevents.Event) + // * func(context.Context, cloudevents.Event) error + // * func(cloudevents.Event, *cloudevents.EventResponse) + // * func(cloudevents.Event, *cloudevents.EventResponse) error + // * func(context.Context, cloudevents.Event, *cloudevents.EventResponse) + // * func(context.Context, cloudevents.Event, *cloudevents.EventResponse) error + // Note: if fn returns an error, it is treated as a critical and + // EventResponse will not be processed. + StartReceiver(ctx context.Context, fn interface{}) error +} + +// New produces a new client with the provided transport object and applied +// client options. +func New(t transport.Transport, opts ...Option) (Client, error) { + c := &ceClient{ + transport: t, + } + if err := c.applyOptions(opts...); err != nil { + return nil, err + } + t.SetReceiver(c) + return c, nil +} + +// NewDefault provides the good defaults for the common case using an HTTP +// Transport client. The http transport has had WithBinaryEncoding http +// transport option applied to it. The client will always send Binary +// encoding but will inspect the outbound event context and match the version. +// The WithtimeNow and WithUUIDs client options are also applied to the client, +// all outbound events will have a time and id set if not already present. +func NewDefault() (Client, error) { + t, err := http.New(http.WithBinaryEncoding()) + if err != nil { + return nil, err + } + c, err := New(t, WithTimeNow(), WithUUIDs()) + if err != nil { + return nil, err + } + return c, nil +} + +type ceClient struct { + transport transport.Transport + fn *receiverFn + + convertFn ConvertFn + + receiverMu sync.Mutex + eventDefaulterFns []EventDefaulter +} + +// Send transmits the provided event on a preconfigured Transport. +// Send returns a response event if there is a response or an error if there +// was an an issue validating the outbound event or the transport returns an +// error. +func (c *ceClient) Send(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) { + ctx, r := observability.NewReporter(ctx, reportSend) + rctx, resp, err := c.obsSend(ctx, event) + if err != nil { + r.Error() + } else { + r.OK() + } + return rctx, resp, err +} + +func (c *ceClient) obsSend(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) { + // Confirm we have a transport set. + if c.transport == nil { + return ctx, nil, fmt.Errorf("client not ready, transport not initialized") + } + // Apply the defaulter chain to the incoming event. + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + event = fn(ctx, event) + } + } + + // Validate the event conforms to the CloudEvents Spec. + if err := event.Validate(); err != nil { + return ctx, nil, err + } + // Send the event over the transport. + return c.transport.Send(ctx, event) +} + +// Receive is called from from the transport on event delivery. +func (c *ceClient) Receive(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error { + ctx, r := observability.NewReporter(ctx, reportReceive) + err := c.obsReceive(ctx, event, resp) + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} + +func (c *ceClient) obsReceive(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error { + if c.fn != nil { + ctx, rFn := observability.NewReporter(ctx, reportReceiveFn) + err := c.fn.invoke(ctx, event, resp) + if err != nil { + rFn.Error() + } else { + rFn.OK() + } + + // Apply the defaulter chain to the outgoing event. + if err == nil && resp != nil && resp.Event != nil && len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + *resp.Event = fn(ctx, *resp.Event) + } + // Validate the event conforms to the CloudEvents Spec. + if err := resp.Event.Validate(); err != nil { + return fmt.Errorf("cloudevent validation failed on response event: %v", err) + } + } + return err + } + return nil +} + +// StartReceiver sets up the given fn to handle Receive. +// See Client.StartReceiver for details. This is a blocking call. +func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { + c.receiverMu.Lock() + defer c.receiverMu.Unlock() + + if c.transport == nil { + return fmt.Errorf("client not ready, transport not initialized") + } + if c.fn != nil { + return fmt.Errorf("client already has a receiver") + } + + if fn, err := receiver(fn); err != nil { + return err + } else { + c.fn = fn + } + + defer func() { + c.fn = nil + }() + + return c.transport.StartReceiver(ctx) +} + +func (c *ceClient) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +// Convert implements transport Converter.Convert. +func (c *ceClient) Convert(ctx context.Context, m transport.Message, err error) (*cloudevents.Event, error) { + if c.convertFn != nil { + return c.convertFn(ctx, m, err) + } + return nil, err +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go new file mode 100644 index 000000000000..40bd85a9cb5a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go @@ -0,0 +1,37 @@ +package client + +import ( + "context" + "time" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/google/uuid" +) + +// EventDefaulter is the function signature for extensions that are able +// to perform event defaulting. +type EventDefaulter func(ctx context.Context, event cloudevents.Event) cloudevents.Event + +// DefaultIDToUUIDIfNotSet will inspect the provided event and assign a UUID to +// context.ID if it is found to be empty. +func DefaultIDToUUIDIfNotSet(ctx context.Context, event cloudevents.Event) cloudevents.Event { + if event.Context != nil { + if event.ID() == "" { + event.Context = event.Context.Clone() + event.SetID(uuid.New().String()) + } + } + return event +} + +// DefaultTimeToNowIfNotSet will inspect the provided event and assign a new +// Timestamp to context.Time if it is found to be nil or zero. +func DefaultTimeToNowIfNotSet(ctx context.Context, event cloudevents.Event) cloudevents.Event { + if event.Context != nil { + if event.Time().IsZero() { + event.Context = event.Context.Clone() + event.SetTime(time.Now()) + } + } + return event +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/doc.go new file mode 100644 index 000000000000..a6a602bb4107 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/doc.go @@ -0,0 +1,6 @@ +/* +Package client holds the recommended entry points for interacting with the CloudEvents Golang SDK. The client wraps +a selected transport. The client adds validation and defaulting for sending events, and flexible receiver method +registration. For full details, read the `client.Client` documentation. +*/ +package client diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/observability.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/observability.go new file mode 100644 index 000000000000..b844c19a86ef --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/observability.go @@ -0,0 +1,68 @@ +package client + +import ( + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var ( + // LatencyMs measures the latency in milliseconds for the CloudEvents + // client methods. + LatencyMs = stats.Float64("cloudevents.io/sdk-go/client/latency", "The latency in milliseconds for the CloudEvents client methods.", "ms") +) + +var ( + // LatencyView is an OpenCensus view that shows client method latency. + LatencyView = &view.View{ + Name: "client/latency", + Measure: LatencyMs, + Description: "The distribution of latency inside of client for CloudEvents.", + Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), + TagKeys: observability.LatencyTags(), + } +) + +type observed int32 + +// Adheres to Observable +var _ observability.Observable = observed(0) + +const ( + reportSend observed = iota + reportReceive + reportReceiveFn +) + +// TraceName implements Observable.TraceName +func (o observed) TraceName() string { + switch o { + case reportSend: + return "client/send" + case reportReceive: + return "client/receive" + case reportReceiveFn: + return "client/receive/fn" + default: + return "client/unknown" + } +} + +// MethodName implements Observable.MethodName +func (o observed) MethodName() string { + switch o { + case reportSend: + return "send" + case reportReceive: + return "receive" + case reportReceiveFn: + return "receive/fn" + default: + return "unknown" + } +} + +// LatencyMs implements Observable.LatencyMs +func (o observed) LatencyMs() *stats.Float64Measure { + return LatencyMs +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/options.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/options.go new file mode 100644 index 000000000000..6e5051c3eafc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/options.go @@ -0,0 +1,53 @@ +package client + +import ( + "fmt" +) + +// Option is the function signature required to be considered an client.Option. +type Option func(*ceClient) error + +// WithEventDefaulter adds an event defaulter to the end of the defaulter chain. +func WithEventDefaulter(fn EventDefaulter) Option { + return func(c *ceClient) error { + if fn == nil { + return fmt.Errorf("client option was given an nil event defaulter") + } + c.eventDefaulterFns = append(c.eventDefaulterFns, fn) + return nil + } +} + +// WithUUIDs adds DefaultIDToUUIDIfNotSet event defaulter to the end of the +// defaulter chain. +func WithUUIDs() Option { + return func(c *ceClient) error { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultIDToUUIDIfNotSet) + return nil + } +} + +// WithTimeNow adds DefaultTimeToNowIfNotSet event defaulter to the end of the +// defaulter chain. +func WithTimeNow() Option { + return func(c *ceClient) error { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet) + return nil + } +} + +// WithConverterFn defines the function the transport will use to delegate +// conversion of non-decodable messages. +func WithConverterFn(fn ConvertFn) Option { + return func(c *ceClient) error { + if fn == nil { + return fmt.Errorf("client option was given an nil message converter") + } + if c.transport.HasConverter() { + return fmt.Errorf("transport converter already set") + } + c.convertFn = fn + c.transport.SetConverter(c) + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/receiver.go new file mode 100644 index 000000000000..9734341d43f6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/receiver.go @@ -0,0 +1,193 @@ +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" +) + +// Receive is the signature of a fn to be invoked for incoming cloudevents. +// If fn returns an error, EventResponse will not be considered by the client or +// or transport. +// This is just an FYI: +type ReceiveFull func(context.Context, cloudevents.Event, *cloudevents.EventResponse) error + +type receiverFn struct { + numIn int + fnValue reflect.Value + + hasContextIn bool + hasEventIn bool + hasEventResponseIn bool + + hasErrorOut bool +} + +// ConvertFn defines the signature the client expects to enable conversion +// delegation. +type ConvertFn func(context.Context, transport.Message, error) (*cloudevents.Event, error) + +const ( + inParamUsage = "expected a function taking either no parameters, one or more of (context.Context, cloudevents.Event, *cloudevents.EventResponse) ordered" + outParamUsage = "expected a function returning either nothing or an error" +) + +var ( + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + eventType = reflect.TypeOf((*cloudevents.Event)(nil)).Elem() + eventResponseType = reflect.TypeOf((*cloudevents.EventResponse)(nil)) // want the ptr type + errorType = reflect.TypeOf((*error)(nil)).Elem() +) + +// receiver creates a receiverFn wrapper class that is used by the client to +// validate and invoke the provided function. +// Valid fn signatures are: +// * func() +// * func() error +// * func(context.Context) +// * func(context.Context) error +// * func(cloudevents.Event) +// * func(cloudevents.Event) error +// * func(context.Context, cloudevents.Event) +// * func(context.Context, cloudevents.Event) error +// * func(cloudevents.Event, *cloudevents.EventResponse) +// * func(cloudevents.Event, *cloudevents.EventResponse) error +// * func(context.Context, cloudevents.Event, *cloudevents.EventResponse) +// * func(context.Context, cloudevents.Event, *cloudevents.EventResponse) error +// +func receiver(fn interface{}) (*receiverFn, error) { + fnType := reflect.TypeOf(fn) + if fnType.Kind() != reflect.Func { + return nil, errors.New("must pass a function to handle events") + } + + r := &receiverFn{ + fnValue: reflect.ValueOf(fn), + numIn: fnType.NumIn(), + } + if err := r.validate(fnType); err != nil { + return nil, err + } + + return r, nil +} + +func (r *receiverFn) invoke(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error { + args := make([]reflect.Value, 0, r.numIn) + + if r.numIn > 0 { + if r.hasContextIn { + args = append(args, reflect.ValueOf(ctx)) + } + if r.hasEventIn { + args = append(args, reflect.ValueOf(event)) + } + if r.hasEventResponseIn { + args = append(args, reflect.ValueOf(resp)) + } + } + v := r.fnValue.Call(args) + if r.hasErrorOut && len(v) >= 1 { + if err, ok := v[0].Interface().(error); ok { + return err + } + } + return nil +} + +// Verifies that the inputs to a function have a valid signature +// Valid input is to be [0, all] of +// context.Context, cloudevents.Event, *cloudevents.EventResponse in this order. +func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { + r.hasContextIn = false + r.hasEventIn = false + r.hasEventResponseIn = false + + switch fnType.NumIn() { + case 3: + // has to be cloudevents.Event, *cloudevents.EventResponse + if !fnType.In(2).ConvertibleTo(eventResponseType) { + return fmt.Errorf("%s; cannot convert parameter 2 from %s to *cloudevents.EventResponse", inParamUsage, fnType.In(2)) + } else { + r.hasEventResponseIn = true + } + fallthrough + case 2: + // can be cloudevents.Event or *cloudevents.EventResponse + if !fnType.In(1).ConvertibleTo(eventResponseType) { + if !fnType.In(1).ConvertibleTo(eventType) { + return fmt.Errorf("%s; cannot convert parameter 1 from %s to cloudevents.Event or *cloudevents.EventResponse", inParamUsage, fnType.In(1)) + } else { + r.hasEventIn = true + } + } else if r.hasEventResponseIn { + return fmt.Errorf("%s; duplicate parameter of type *cloudevents.EventResponse", inParamUsage) + } else { + r.hasEventResponseIn = true + } + fallthrough + case 1: + if !fnType.In(0).ConvertibleTo(contextType) { + if !fnType.In(0).ConvertibleTo(eventResponseType) { + if !fnType.In(0).ConvertibleTo(eventType) { + return fmt.Errorf("%s; cannot convert parameter 0 from %s to context.Context, cloudevents.Event or *cloudevents.EventResponse", inParamUsage, fnType.In(0)) + } else if r.hasEventIn { + return fmt.Errorf("%s; duplicate parameter of type cloudevents.Event", inParamUsage) + } else { + r.hasEventIn = true + } + } else if r.hasEventResponseIn { + return fmt.Errorf("%s; duplicate parameter of type *cloudevents.EventResponse", inParamUsage) + } else if r.hasEventIn { + return fmt.Errorf("%s; out of order parameter 0 for %s", inParamUsage, fnType.In(1)) + } else { + r.hasEventResponseIn = true + } + } else { + r.hasContextIn = true + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) + } +} + +// Verifies that the outputs of a function have a valid signature +// Valid output signatures: +// (), (error) +func (r *receiverFn) validateOutParamSignature(fnType reflect.Type) error { + r.hasErrorOut = false + switch fnType.NumOut() { + case 1: + paramNo := fnType.NumOut() - 1 + paramType := fnType.Out(paramNo) + if !paramType.ConvertibleTo(errorType) { + return fmt.Errorf("%s; cannot convert return type %d from %s to error", outParamUsage, paramNo, paramType) + } else { + r.hasErrorOut = true + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) + } +} + +// validateReceiverFn validates that a function has the right number of in and +// out params and that they are of allowed types. +func (r *receiverFn) validate(fnType reflect.Type) error { + if err := r.validateInParamSignature(fnType); err != nil { + return err + } + if err := r.validateOutParamSignature(fnType); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go new file mode 100644 index 000000000000..e4e0e17f2b79 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go @@ -0,0 +1,35 @@ +package cloudevents + +const ( + TextJSON = "text/json" + ApplicationJSON = "application/json" + ApplicationXML = "application/xml" + ApplicationCloudEventsJSON = "application/cloudevents+json" + ApplicationCloudEventsBatchJSON = "application/cloudevents-batch+json" +) + +// StringOfApplicationJSON returns a string pointer to "application/json" +func StringOfApplicationJSON() *string { + a := ApplicationJSON + return &a +} + +// StringOfApplicationXML returns a string pointer to "application/xml" +func StringOfApplicationXML() *string { + a := ApplicationXML + return &a +} + +// StringOfApplicationCloudEventsJSON returns a string pointer to +// "application/cloudevents+json" +func StringOfApplicationCloudEventsJSON() *string { + a := ApplicationCloudEventsJSON + return &a +} + +// StringOfApplicationCloudEventsBatchJSON returns a string pointer to +// "application/cloudevents-batch+json" +func StringOfApplicationCloudEventsBatchJSON() *string { + a := ApplicationCloudEventsBatchJSON + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/context.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/context.go new file mode 100644 index 000000000000..e580360f130b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/context.go @@ -0,0 +1,76 @@ +package context + +import ( + "context" + "net/url" + "strings" +) + +// Opaque key type used to store target +type targetKeyType struct{} + +var targetKey = targetKeyType{} + +// WithTarget returns back a new context with the given target. Target is intended to be transport dependent. +// For http transport, `target` should be a full URL and will be injected into the outbound http request. +func WithTarget(ctx context.Context, target string) context.Context { + return context.WithValue(ctx, targetKey, target) +} + +// TargetFrom looks in the given context and returns `target` as a parsed url if found and valid, otherwise nil. +func TargetFrom(ctx context.Context) *url.URL { + c := ctx.Value(targetKey) + if c != nil { + if s, ok := c.(string); ok && s != "" { + if target, err := url.Parse(s); err == nil { + return target + } + } + } + return nil +} + +// Opaque key type used to store topic +type topicKeyType struct{} + +var topicKey = topicKeyType{} + +// WithTopic returns back a new context with the given topic. Topic is intended to be transport dependent. +// For pubsub transport, `topic` should be a Pub/Sub Topic ID. +func WithTopic(ctx context.Context, topic string) context.Context { + return context.WithValue(ctx, topicKey, topic) +} + +// TopicFrom looks in the given context and returns `topic` as a string if found and valid, otherwise "". +func TopicFrom(ctx context.Context) string { + c := ctx.Value(topicKey) + if c != nil { + if s, ok := c.(string); ok { + return s + } + } + return "" +} + +// Opaque key type used to store encoding +type encodingKeyType struct{} + +var encodingKey = encodingKeyType{} + +// WithEncoding returns back a new context with the given encoding. Encoding is intended to be transport dependent. +// For http transport, `encoding` should be one of [binary, structured] and will be used to override the outbound +// codec encoding setting. If the transport does not understand the encoding, it will be ignored. +func WithEncoding(ctx context.Context, encoding string) context.Context { + return context.WithValue(ctx, encodingKey, strings.ToLower(encoding)) +} + +// EncodingFrom looks in the given context and returns `target` as a parsed url if found and valid, otherwise nil. +func EncodingFrom(ctx context.Context) string { + c := ctx.Value(encodingKey) + if c != nil { + if s, ok := c.(string); ok && s != "" { + return s + } + } + return "" +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/doc.go new file mode 100644 index 000000000000..377cab850fc7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/doc.go @@ -0,0 +1,5 @@ +/* +Package context holds the last resort overrides and fyi objects that can be passed to clients and transports added to +context.Context objects. +*/ +package context diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go new file mode 100644 index 000000000000..996f720572ed --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go @@ -0,0 +1,43 @@ +package context + +import ( + "context" + + "go.uber.org/zap" +) + +// Opaque key type used to store logger +type loggerKeyType struct{} + +var loggerKey = loggerKeyType{} + +// fallbackLogger is the logger is used when there is no logger attached to the context. +var fallbackLogger *zap.SugaredLogger + +func init() { + if logger, err := zap.NewProduction(); err != nil { + // We failed to create a fallback logger. + fallbackLogger = zap.NewNop().Sugar() + } else { + fallbackLogger = logger.Named("fallback").Sugar() + } +} + +// WithLogger returns a new context with the logger injected into the given context. +func WithLogger(ctx context.Context, logger *zap.SugaredLogger) context.Context { + if logger == nil { + return context.WithValue(ctx, loggerKey, fallbackLogger) + } + return context.WithValue(ctx, loggerKey, logger) +} + +// LoggerFrom returns the logger stored in context. +func LoggerFrom(ctx context.Context) *zap.SugaredLogger { + l := ctx.Value(loggerKey) + if l != nil { + if logger, ok := l.(*zap.SugaredLogger); ok { + return logger + } + } + return fallbackLogger +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go new file mode 100644 index 000000000000..180102ee3fa1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go @@ -0,0 +1,11 @@ +package cloudevents + +const ( + Base64 = "base64" +) + +// StringOfBase64 returns a string pointer to "Base64" +func StringOfBase64() *string { + a := Base64 + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go new file mode 100644 index 000000000000..41425c21ff54 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go @@ -0,0 +1,93 @@ +package datacodec + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json" + "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml" + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" +) + +// Decoder is the expected function signature for decoding `in` to `out`. What +// `in` is could be decoder dependent. For example, `in` could be bytes, or a +// base64 string. +type Decoder func(ctx context.Context, in, out interface{}) error + +// Encoder is the expected function signature for encoding `in` to bytes. +// Returns an error if the encoder has an issue encoding `in`. +type Encoder func(ctx context.Context, in interface{}) ([]byte, error) + +var decoder map[string]Decoder +var encoder map[string]Encoder + +func init() { + decoder = make(map[string]Decoder, 10) + encoder = make(map[string]Encoder, 10) + + AddDecoder("", json.Decode) + AddDecoder("application/json", json.Decode) + AddDecoder("text/json", json.Decode) + AddDecoder("application/xml", xml.Decode) + AddDecoder("text/xml", xml.Decode) + + AddEncoder("", json.Encode) + AddEncoder("application/json", json.Encode) + AddEncoder("text/json", json.Encode) + AddEncoder("application/xml", xml.Encode) + AddEncoder("text/xml", xml.Encode) +} + +// AddDecoder registers a decoder for a given content type. The codecs will use +// these to decode the data payload from a cloudevent.Event object. +func AddDecoder(contentType string, fn Decoder) { + decoder[contentType] = fn +} + +// AddEncoder registers an encoder for a given content type. The codecs will +// use these to encode the data payload for a cloudevent.Event object. +func AddEncoder(contentType string, fn Encoder) { + encoder[contentType] = fn +} + +// Decode looks up and invokes the decoder registered for the given content +// type. An error is returned if no decoder is registered for the given +// content type. +func Decode(ctx context.Context, contentType string, in, out interface{}) error { + _, r := observability.NewReporter(ctx, reportDecode) + err := obsDecode(ctx, contentType, in, out) + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} + +func obsDecode(ctx context.Context, contentType string, in, out interface{}) error { + if fn, ok := decoder[contentType]; ok { + return fn(ctx, in, out) + } + return fmt.Errorf("[decode] unsupported content type: %q", contentType) +} + +// Encode looks up and invokes the encoder registered for the given content +// type. An error is returned if no encoder is registered for the given +// content type. +func Encode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { + _, r := observability.NewReporter(ctx, reportEncode) + b, err := obsEncode(ctx, contentType, in) + if err != nil { + r.Error() + } else { + r.OK() + } + return b, err +} + +func obsEncode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { + if fn, ok := encoder[contentType]; ok { + return fn(ctx, in) + } + return nil, fmt.Errorf("[encode] unsupported content type: %q", contentType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/doc.go new file mode 100644 index 000000000000..9e401534e274 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/doc.go @@ -0,0 +1,5 @@ +/* +Package datacodec holds the data codec registry and adds known encoders and decoders supporting media types such as +`application/json` and `application/xml`. +*/ +package datacodec diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/data.go new file mode 100644 index 000000000000..926c344fed01 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/data.go @@ -0,0 +1,97 @@ +package json + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" +) + +// Decode takes `in` as []byte, or base64 string, normalizes in to unquoted and +// base64 decoded []byte if required, and then attempts to use json.Unmarshal +// to convert those bytes to `out`. Returns and error if this process fails. +func Decode(ctx context.Context, in, out interface{}) error { + _, r := observability.NewReporter(ctx, reportDecode) + err := obsDecode(ctx, in, out) + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} + +func obsDecode(ctx context.Context, in, out interface{}) error { + if in == nil { + return nil + } + if out == nil { + return fmt.Errorf("out is nil") + } + + b, ok := in.([]byte) // TODO: I think there is fancy marshaling happening here. Fix with reflection? + if !ok { + var err error + b, err = json.Marshal(in) + if err != nil { + return fmt.Errorf("[json] failed to marshal in: %s", err.Error()) + } + } + + // TODO: the spec says json could be just data... At the moment we expect wrapped. + if len(b) > 1 && (b[0] == byte('"') || (b[0] == byte('\\') && b[1] == byte('"'))) { + s, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("[json] failed to unquote in: %s", err.Error()) + } + if len(s) > 0 && (s[0] == '{' || s[0] == '[') { + // looks like json, use it + b = []byte(s) + } + } + + if err := json.Unmarshal(b, out); err != nil { + return fmt.Errorf("[json] found bytes \"%s\", but failed to unmarshal: %s", string(b), err.Error()) + } + return nil +} + +// Encode attempts to json.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or json.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + _, r := observability.NewReporter(ctx, reportEncode) + b, err := obsEncode(ctx, in) + if err != nil { + r.Error() + } else { + r.OK() + } + return b, err +} + +func obsEncode(ctx context.Context, in interface{}) ([]byte, error) { + if in == nil { + return nil, nil + } + + it := reflect.TypeOf(in) + switch it.Kind() { + case reflect.Slice: + if it.Elem().Kind() == reflect.Uint8 { + + if b, ok := in.([]byte); ok && len(b) > 0 { + // check to see if it is a pre-encoded byte string. + if b[0] == byte('"') || b[0] == byte('{') || b[0] == byte('[') { + return b, nil + } + } + + } + } + + return json.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/doc.go new file mode 100644 index 000000000000..86772c2e3393 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/doc.go @@ -0,0 +1,4 @@ +/* +Package json holds the encoder/decoder implementation for `application/json`. +*/ +package json diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/observability.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/observability.go new file mode 100644 index 000000000000..d38a4b7d250d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/observability.go @@ -0,0 +1,63 @@ +package json + +import ( + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var ( + // LatencyMs measures the latency in milliseconds for the CloudEvents json + // data codec methods. + LatencyMs = stats.Float64("cloudevents.io/sdk-go/datacodec/json/latency", "The latency in milliseconds for the CloudEvents json data codec methods.", "ms") +) + +var ( + // LatencyView is an OpenCensus view that shows data codec json method latency. + LatencyView = &view.View{ + Name: "datacodec/json/latency", + Measure: LatencyMs, + Description: "The distribution of latency inside of the json data codec for CloudEvents.", + Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), + TagKeys: observability.LatencyTags(), + } +) + +type observed int32 + +// Adheres to Observable +var _ observability.Observable = observed(0) + +const ( + reportEncode observed = iota + reportDecode +) + +// TraceName implements Observable.TraceName +func (o observed) TraceName() string { + switch o { + case reportEncode: + return "datacodec/json/encode" + case reportDecode: + return "datacodec/json/decode" + default: + return "datacodec/json/unknown" + } +} + +// MethodName implements Observable.MethodName +func (o observed) MethodName() string { + switch o { + case reportEncode: + return "encode" + case reportDecode: + return "decode" + default: + return "unknown" + } +} + +// LatencyMs implements Observable.LatencyMs +func (o observed) LatencyMs() *stats.Float64Measure { + return LatencyMs +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/observability.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/observability.go new file mode 100644 index 000000000000..a51e05eb9fc4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/observability.go @@ -0,0 +1,63 @@ +package datacodec + +import ( + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var ( + // LatencyMs measures the latency in milliseconds for the CloudEvents generic + // codec data methods. + LatencyMs = stats.Float64("cloudevents.io/sdk-go/datacodec/latency", "The latency in milliseconds for the CloudEvents generic data codec methods.", "ms") +) + +var ( + // LatencyView is an OpenCensus view that shows data codec method latency. + LatencyView = &view.View{ + Name: "datacodec/latency", + Measure: LatencyMs, + Description: "The distribution of latency inside of the generic data codec for CloudEvents.", + Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), + TagKeys: observability.LatencyTags(), + } +) + +type observed int32 + +// Adheres to Observable +var _ observability.Observable = observed(0) + +const ( + reportEncode observed = iota + reportDecode +) + +// TraceName implements Observable.TraceName +func (o observed) TraceName() string { + switch o { + case reportEncode: + return "datacodec/encode" + case reportDecode: + return "datacodec/decode" + default: + return "datacodec/unknown" + } +} + +// MethodName implements Observable.MethodName +func (o observed) MethodName() string { + switch o { + case reportEncode: + return "encode" + case reportDecode: + return "decode" + default: + return "unknown" + } +} + +// LatencyMs implements Observable.LatencyMs +func (o observed) LatencyMs() *stats.Float64Measure { + return LatencyMs +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/data.go new file mode 100644 index 000000000000..6339e4443378 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/data.go @@ -0,0 +1,90 @@ +package xml + +import ( + "context" + "encoding/base64" + "encoding/xml" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" +) + +// Decode takes `in` as []byte, or base64 string, normalizes in to unquoted and +// base64 decoded []byte if required, and then attempts to use xml.Unmarshal +// to convert those bytes to `out`. Returns and error if this process fails. +func Decode(ctx context.Context, in, out interface{}) error { + _, r := observability.NewReporter(ctx, reportDecode) + err := obsDecode(ctx, in, out) + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} + +func obsDecode(ctx context.Context, in, out interface{}) error { + if in == nil { + return nil + } + + b, ok := in.([]byte) + if !ok { + var err error + b, err = xml.Marshal(in) + if err != nil { + return fmt.Errorf("[xml] failed to marshal in: %s", err.Error()) + } + } + + // If the message is encoded as a base64 block as a string, we need to + // decode that first before trying to unmarshal the bytes + if len(b) > 1 && (b[0] == byte('"') || (b[0] == byte('\\') && b[1] == byte('"'))) { + s, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("[xml] failed to unquote quoted data: %s", err.Error()) + } + if len(s) > 0 && s[0] == '<' { + // looks like xml, use it + b = []byte(s) + } else if len(s) > 0 { + // looks like base64, decode + bs, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return fmt.Errorf("[xml] failed to decode base64 encoded string: %s", err.Error()) + } + b = bs + } + } + + if err := xml.Unmarshal(b, out); err != nil { + return fmt.Errorf("[xml] found bytes, but failed to unmarshal: %s %s", err.Error(), string(b)) + } + return nil +} + +// Encode attempts to xml.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or xml.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + _, r := observability.NewReporter(ctx, reportEncode) + b, err := obsEncode(ctx, in) + if err != nil { + r.Error() + } else { + r.OK() + } + return b, err +} + +func obsEncode(ctx context.Context, in interface{}) ([]byte, error) { + if b, ok := in.([]byte); ok { + // check to see if it is a pre-encoded byte string. + if len(b) > 0 && b[0] == byte('"') { + return b, nil + } + } + + return xml.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/doc.go new file mode 100644 index 000000000000..d90b7c444daf --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/doc.go @@ -0,0 +1,4 @@ +/* +Package xml holds the encoder/decoder implementation for `application/xml`. +*/ +package xml diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/observability.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/observability.go new file mode 100644 index 000000000000..31b0bb26998f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/observability.go @@ -0,0 +1,63 @@ +package xml + +import ( + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var ( + // LatencyMs measures the latency in milliseconds for the CloudEvents xml data + // codec methods. + LatencyMs = stats.Float64("cloudevents.io/sdk-go/datacodec/xml/latency", "The latency in milliseconds for the CloudEvents xml data codec methods.", "ms") +) + +var ( + // LatencyView is an OpenCensus view that shows data codec xml method latency. + LatencyView = &view.View{ + Name: "datacodec/xml/latency", + Measure: LatencyMs, + Description: "The distribution of latency inside of the xml data codec for CloudEvents.", + Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), + TagKeys: observability.LatencyTags(), + } +) + +type observed int32 + +// Adheres to Observable +var _ observability.Observable = observed(0) + +const ( + reportEncode observed = iota + reportDecode +) + +// TraceName implements Observable.TraceName +func (o observed) TraceName() string { + switch o { + case reportEncode: + return "datacodec/xml/encode" + case reportDecode: + return "datacodec/xml/decode" + default: + return "datacodec/xml/unknown" + } +} + +// MethodName implements Observable.MethodName +func (o observed) MethodName() string { + switch o { + case reportEncode: + return "encode" + case reportDecode: + return "decode" + default: + return "unknown" + } +} + +// LatencyMs implements Observable.LatencyMs +func (o observed) LatencyMs() *stats.Float64Measure { + return LatencyMs +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/doc.go new file mode 100644 index 000000000000..cc2201da9150 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/doc.go @@ -0,0 +1,4 @@ +/* +Package cloudevents provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. +*/ +package cloudevents diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go new file mode 100644 index 000000000000..9b0b9036781c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go @@ -0,0 +1,97 @@ +package cloudevents + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// Event represents the canonical representation of a CloudEvent. +type Event struct { + Context EventContext + Data interface{} + DataEncoded bool +} + +const ( + defaultEventVersion = CloudEventsVersionV02 +) + +// New returns a new Event, an optional version can be passed to change the +// default spec version from 0.2 to the provided version. +func New(version ...string) Event { + specVersion := defaultEventVersion // TODO: should there be a default? or set a default? + if len(version) >= 1 { + specVersion = version[0] + } + e := &Event{} + e.SetSpecVersion(specVersion) + return *e +} + +// ExtensionAs returns Context.ExtensionAs(name, obj) +func (e Event) ExtensionAs(name string, obj interface{}) error { + return e.Context.ExtensionAs(name, obj) +} + +// Validate performs a spec based validation on this event. +// Validation is dependent on the spec version specified in the event context. +func (e Event) Validate() error { + if e.Context == nil { + return fmt.Errorf("every event conforming to the CloudEvents specification MUST include a context") + } + + if err := e.Context.Validate(); err != nil { + return err + } + + // TODO: validate data. + + return nil +} + +// String returns a pretty-printed representation of the Event. +func (e Event) String() string { + b := strings.Builder{} + + b.WriteString("Validation: ") + + valid := e.Validate() + if valid == nil { + b.WriteString("valid\n") + } else { + b.WriteString("invalid\n") + } + if valid != nil { + b.WriteString(fmt.Sprintf("Validation Error: \n%s\n", valid.Error())) + } + + b.WriteString(e.Context.String()) + + if e.Data != nil { + b.WriteString("Data,\n ") + if strings.HasPrefix(e.DataContentType(), ApplicationJSON) { + var prettyJSON bytes.Buffer + + data, ok := e.Data.([]byte) + if !ok { + var err error + data, err = json.Marshal(e.Data) + if err != nil { + data = []byte(err.Error()) + } + } + err := json.Indent(&prettyJSON, data, " ", " ") + if err != nil { + b.Write(e.Data.([]byte)) + } else { + b.Write(prettyJSON.Bytes()) + } + } else { + b.Write(e.Data.([]byte)) + } + b.WriteString("\n") + } + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go new file mode 100644 index 000000000000..9e4cb9d24718 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go @@ -0,0 +1,99 @@ +package cloudevents + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec" +) + +// Data is special. Break it out into it's own file. + +// SetData implements EventWriter.SetData +func (e *Event) SetData(obj interface{}) error { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + if e.DataContentEncoding() == Base64 { + buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(buf, data) + e.Data = string(buf) + } else { + e.Data = data + } + e.DataEncoded = true + return nil +} + +func (e *Event) DataBytes() ([]byte, error) { + if !e.DataEncoded { + if err := e.SetData(e.Data); err != nil { + return nil, err + } + } + + b, ok := e.Data.([]byte) + if !ok { + if s, ok := e.Data.(string); ok { + b = []byte(s) + } else { + // No data. + return []byte(nil), nil + } + } + return b, nil +} + +const ( + quotes = `"'` +) + +// DataAs attempts to populate the provided data object with the event payload. +// data should be a pointer type. +func (e Event) DataAs(data interface{}) error { // TODO: Clean this function up + if e.Data == nil { + return nil + } + obj, ok := e.Data.([]byte) + if !ok { + if s, ok := e.Data.(string); ok { + obj = []byte(s) + } else { + return errors.New("data was not a byte slice or string") + } + } + if len(obj) == 0 { + // No data. + return nil + } + if e.Context.GetDataContentEncoding() == Base64 { + var bs []byte + // test to see if we need to unquote the data. + if obj[0] == quotes[0] || obj[0] == quotes[1] { + str, err := strconv.Unquote(string(obj)) + if err != nil { + return err + } + bs = []byte(str) + } else { + bs = obj + } + + buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs))) + n, err := base64.StdEncoding.Decode(buf, bs) + if err != nil { + return fmt.Errorf("failed to decode data from base64: %s", err.Error()) + } + obj = buf[:n] + } + + mediaType, err := e.Context.GetDataMediaType() + if err != nil { + return err + } + return datacodec.Decode(context.Background(), mediaType, obj, data) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go new file mode 100644 index 000000000000..8ca52b04d803 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go @@ -0,0 +1,75 @@ +package cloudevents + +import ( + "time" +) + +// EventWriter is the interface for reading through an event from attributes. +type EventReader interface { + // SpecVersion returns event.Context.GetSpecVersion(). + SpecVersion() string + // Type returns event.Context.GetType(). + Type() string + // Source returns event.Context.GetSource(). + Source() string + // Subject returns event.Context.GetSubject(). + Subject() string + // ID returns event.Context.GetID(). + ID() string + // Time returns event.Context.GetTime(). + Time() time.Time + // SchemaURL returns event.Context.GetSchemaURL(). + SchemaURL() string + // DataContentType returns event.Context.GetDataContentType(). + DataContentType() string + // DataMediaType returns event.Context.GetDataMediaType(). + DataMediaType() string + // DataContentEncoding returns event.Context.GetDataContentEncoding(). + DataContentEncoding() string + + // Extension Attributes + + // Extensions returns the event.Context.GetExtensions(). + Extensions() map[string]interface{} + + // ExtensionAs returns event.Context.ExtensionAs(name, obj). + ExtensionAs(string, interface{}) error + + // Data Attribute + + // ExtensionAs returns event.Context.ExtensionAs(name, obj). + DataAs(interface{}) error +} + +// EventWriter is the interface for writing through an event onto attributes. +// If an error is thrown by a sub-component, EventWriter panics. +type EventWriter interface { + // Context Attributes + + // SetSpecVersion performs event.Context.SetSpecVersion. + SetSpecVersion(string) + // SetType performs event.Context.SetType. + SetType(string) + // SetSource performs event.Context.SetSource. + SetSource(string) + // SetSubject( performs event.Context.SetSubject. + SetSubject(string) + // SetID performs event.Context.SetID. + SetID(string) + // SetTime performs event.Context.SetTime. + SetTime(time.Time) + // SetSchemaURL performs event.Context.SetSchemaURL. + SetSchemaURL(string) + // SetDataContentType performs event.Context.SetDataContentType. + SetDataContentType(string) + // SetDataContentEncoding performs event.Context.SetDataContentEncoding. + SetDataContentEncoding(string) + + // Extension Attributes + + // SetExtension performs event.Context.SetExtension. + SetExtension(string, interface{}) + + // SetData encodes the given payload with the current encoding settings. + SetData(interface{}) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_marshal.go new file mode 100644 index 000000000000..5e2c1602efa3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_marshal.go @@ -0,0 +1,281 @@ +package cloudevents + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" +) + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (e Event) MarshalJSON() ([]byte, error) { + _, r := observability.NewReporter(context.Background(), eventJSONObserved{o: reportMarshal, v: e.SpecVersion()}) + + if err := e.Validate(); err != nil { + r.Error() + return nil, err + } + + b, err := JsonEncode(e) + + // Report the observable + if err != nil { + r.Error() + return nil, err + } else { + r.OK() + } + + return b, nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (e *Event) UnmarshalJSON(b []byte) error { + raw := make(map[string]json.RawMessage) + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + + version := versionFromRawMessage(raw) + + _, r := observability.NewReporter(context.Background(), eventJSONObserved{o: reportUnmarshal, v: version}) + + var err error + switch version { + case CloudEventsVersionV01: + err = e.JsonDecodeV01(b, raw) + case CloudEventsVersionV02: + err = e.JsonDecodeV02(b, raw) + case CloudEventsVersionV03: + err = e.JsonDecodeV03(b, raw) + default: + return fmt.Errorf("unnknown spec version: %q", version) + } + + // Report the observable + if err != nil { + r.Error() + return err + } else { + r.OK() + } + return nil +} + +func versionFromRawMessage(raw map[string]json.RawMessage) string { + // v0.1 + if v, ok := raw["cloudEventsVersion"]; ok { + var version string + if err := json.Unmarshal(v, &version); err != nil { + return "" + } + return version + } + + // v0.2 and after + if v, ok := raw["specversion"]; ok { + var version string + if err := json.Unmarshal(v, &version); err != nil { + return "" + } + return version + } + return "" +} + +// JsonEncode +func JsonEncode(e Event) ([]byte, error) { + if e.DataContentType() == "" { + e.SetDataContentType(ApplicationJSON) + } + data, err := e.DataBytes() + if err != nil { + return nil, err + } + return jsonEncode(e.Context, data) +} + +func jsonEncode(ctx EventContextReader, data []byte) ([]byte, error) { + var b map[string]json.RawMessage + var err error + + if ctx.GetSpecVersion() == CloudEventsVersionV01 { + b, err = marshalEventLegacy(ctx) + } else { + b, err = marshalEvent(ctx, ctx.GetExtensions()) + } + if err != nil { + return nil, err + } + + if data != nil { + // data is passed in as an encoded []byte. That slice might be any + // number of things but for json encoding of the envelope all we care + // is if the payload is either a string or a json object. If it is a + // json object, it can be inserted into the body without modification. + // Otherwise we need to quote it if not already quoted. + mediaType, err := ctx.GetDataMediaType() + if err != nil { + return nil, err + } + isBase64 := ctx.GetDataContentEncoding() == Base64 + isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON + // TODO(#60): we do not support json values at the moment, only objects and lists. + if isJson && !isBase64 { + b["data"] = data + } else if data[0] != byte('"') { + b["data"] = []byte(strconv.QuoteToASCII(string(data))) + } else { + // already quoted + b["data"] = data + } + } + + body, err := json.Marshal(b) + if err != nil { + return nil, err + } + + return body, nil +} + +// JsonDecodeV01 takes in the byte representation of a version 0.1 structured json CloudEvent and returns a +// cloudevent.Event or an error if there are parsing errors. +func (e *Event) JsonDecodeV01(body []byte, raw map[string]json.RawMessage) error { + ec := EventContextV01{} + if err := json.Unmarshal(body, &ec); err != nil { + return err + } + + var data interface{} + if d, ok := raw["data"]; ok { + data = []byte(d) + } + + e.Context = &ec + e.Data = data + e.DataEncoded = data != nil + + return nil +} + +// JsonDecodeV02 takes in the byte representation of a version 0.2 structured json CloudEvent and returns a +// cloudevent.Event or an error if there are parsing errors. +func (e *Event) JsonDecodeV02(body []byte, raw map[string]json.RawMessage) error { + ec := EventContextV02{} + if err := json.Unmarshal(body, &ec); err != nil { + return err + } + + // TODO: could use reflection to get these. + delete(raw, "specversion") + delete(raw, "type") + delete(raw, "source") + delete(raw, "id") + delete(raw, "time") + delete(raw, "schemaurl") + delete(raw, "contenttype") + + var data interface{} + if d, ok := raw["data"]; ok { + data = []byte(d) + } + delete(raw, "data") + + if len(raw) > 0 { + extensions := make(map[string]interface{}, len(raw)) + for k, v := range raw { + extensions[k] = v + } + ec.Extensions = extensions + } + + e.Context = &ec + e.Data = data + e.DataEncoded = data != nil + + return nil +} + +// JsonDecodeV03 takes in the byte representation of a version 0.3 structured json CloudEvent and returns a +// cloudevent.Event or an error if there are parsing errors. +func (e *Event) JsonDecodeV03(body []byte, raw map[string]json.RawMessage) error { + ec := EventContextV03{} + if err := json.Unmarshal(body, &ec); err != nil { + return err + } + + // TODO: could use reflection to get these. + delete(raw, "specversion") + delete(raw, "type") + delete(raw, "source") + delete(raw, "subject") + delete(raw, "id") + delete(raw, "time") + delete(raw, "schemaurl") + delete(raw, "datacontenttype") + delete(raw, "datacontentencoding") + + var data interface{} + if d, ok := raw["data"]; ok { + data = []byte(d) + } + delete(raw, "data") + + if len(raw) > 0 { + extensions := make(map[string]interface{}, len(raw)) + for k, v := range raw { + extensions[k] = v + } + ec.Extensions = extensions + } + + e.Context = &ec + e.Data = data + e.DataEncoded = data != nil + + return nil +} + +func marshalEventLegacy(event interface{}) (map[string]json.RawMessage, error) { + b, err := json.Marshal(event) + if err != nil { + return nil, err + } + + brm := map[string]json.RawMessage{} + if err := json.Unmarshal(b, &brm); err != nil { + return nil, err + } + + return brm, nil +} + +func marshalEvent(event interface{}, extensions map[string]interface{}) (map[string]json.RawMessage, error) { + b, err := json.Marshal(event) + if err != nil { + return nil, err + } + + brm := map[string]json.RawMessage{} + if err := json.Unmarshal(b, &brm); err != nil { + return nil, err + } + + for k, v := range extensions { + vb, err := json.Marshal(v) + if err != nil { + return nil, err + } + // Don't overwrite spec keys. + if _, ok := brm[k]; !ok { + brm[k] = vb + } + } + + return brm, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_observability.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_observability.go new file mode 100644 index 000000000000..bce63f5c600f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_observability.go @@ -0,0 +1,94 @@ +package cloudevents + +import ( + "fmt" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var ( + // EventMarshalLatencyMs measures the latency in milliseconds for the + // CloudEvents.Event marshal/unmarshalJSON methods. + EventMarshalLatencyMs = stats.Float64( + "cloudevents.io/sdk-go/event/json/latency", + "The latency in milliseconds of (un)marshalJSON methods for CloudEvents.Event.", + "ms") +) + +var ( + // LatencyView is an OpenCensus view that shows CloudEvents.Event (un)marshalJSON method latency. + EventMarshalLatencyView = &view.View{ + Name: "event/json/latency", + Measure: EventMarshalLatencyMs, + Description: "The distribution of latency inside of (un)marshalJSON methods for CloudEvents.Event.", + Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), + TagKeys: observability.LatencyTags(), + } +) + +type observed int32 + +// Adheres to Observable +var _ observability.Observable = observed(0) + +const ( + reportMarshal observed = iota + reportUnmarshal +) + +// TraceName implements Observable.TraceName +func (o observed) TraceName() string { + switch o { + case reportMarshal: + return "cloudevents/event/marshaljson" + case reportUnmarshal: + return "cloudevents/event/unmarshaljson" + default: + return "cloudevents/event/unknwown" + } +} + +// MethodName implements Observable.MethodName +func (o observed) MethodName() string { + switch o { + case reportMarshal: + return "marshaljson" + case reportUnmarshal: + return "unmarshaljson" + default: + return "unknown" + } +} + +// LatencyMs implements Observable.LatencyMs +func (o observed) LatencyMs() *stats.Float64Measure { + return EventMarshalLatencyMs +} + +// eventJSONObserved is a wrapper to append version to observed. +type eventJSONObserved struct { + // Method + o observed + // Version + v string +} + +// Adheres to Observable +var _ observability.Observable = (*eventJSONObserved)(nil) + +// TraceName implements Observable.TraceName +func (c eventJSONObserved) TraceName() string { + return fmt.Sprintf("%s/%s", c.o.TraceName(), c.v) +} + +// MethodName implements Observable.MethodName +func (c eventJSONObserved) MethodName() string { + return fmt.Sprintf("%s/%s", c.o.MethodName(), c.v) +} + +// LatencyMs implements Observable.LatencyMs +func (c eventJSONObserved) LatencyMs() *stats.Float64Measure { + return c.o.LatencyMs() +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go new file mode 100644 index 000000000000..a5be4ecf8207 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go @@ -0,0 +1,98 @@ +package cloudevents + +import ( + "time" +) + +var _ EventReader = (*Event)(nil) + +// SpecVersion implements EventReader.SpecVersion +func (e Event) SpecVersion() string { + if e.Context != nil { + return e.Context.GetSpecVersion() + } + return "" +} + +// Type implements EventReader.Type +func (e Event) Type() string { + if e.Context != nil { + return e.Context.GetType() + } + return "" +} + +// Source implements EventReader.Source +func (e Event) Source() string { + if e.Context != nil { + return e.Context.GetSource() + } + return "" +} + +// Subject implements EventReader.Subject +func (e Event) Subject() string { + if e.Context != nil { + return e.Context.GetSubject() + } + return "" +} + +// ID implements EventReader.ID +func (e Event) ID() string { + if e.Context != nil { + return e.Context.GetID() + } + return "" +} + +// Time implements EventReader.Time +func (e Event) Time() time.Time { + if e.Context != nil { + return e.Context.GetTime() + } + return time.Time{} +} + +// SchemaURL implements EventReader.SchemaURL +func (e Event) SchemaURL() string { + if e.Context != nil { + return e.Context.GetSchemaURL() + } + return "" +} + +// DataContentType implements EventReader.DataContentType +func (e Event) DataContentType() string { + if e.Context != nil { + return e.Context.GetDataContentType() + } + return "" +} + +// DataMediaType returns the parsed DataMediaType of the event. If parsing +// fails, the empty string is returned. To retrieve the parsing error, use +// `Context.GetDataMediaType` instead. +func (e Event) DataMediaType() string { + if e.Context != nil { + mediaType, _ := e.Context.GetDataMediaType() + return mediaType + } + return "" +} + +// DataContentEncoding implements EventReader.DataContentEncoding +func (e Event) DataContentEncoding() string { + if e.Context != nil { + return e.Context.GetDataContentEncoding() + } + return "" +} + +// DataContentEncoding implements EventReader.DataContentEncoding +func (e Event) Extensions() map[string]interface{} { + if e.Context != nil { + return e.Context.GetExtensions() + } + return map[string]interface{}(nil) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_response.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_response.go new file mode 100644 index 000000000000..0e5f7ce75d43 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_response.go @@ -0,0 +1,37 @@ +package cloudevents + +// EventResponse represents the canonical representation of a Response to a +// CloudEvent from a receiver. Response implementation is Transport dependent. +type EventResponse struct { + Status int + Event *Event + Reason string + // Context is transport specific struct to allow for controlling transport + // response details. + // For example, see http.TransportResponseContext. + Context interface{} +} + +// RespondWith sets up the instance of EventResponse to be set with status and +// an event. Response implementation is Transport dependent. +func (e *EventResponse) RespondWith(status int, event *Event) { + if e == nil { + // if nil, response not supported + return + } + e.Status = status + if event != nil { + e.Event = event + } +} + +// Error sets the instance of EventResponse to be set with an error code and +// reason string. Response implementation is Transport dependent. +func (e *EventResponse) Error(status int, reason string) { + if e == nil { + // if nil, response not supported + return + } + e.Status = status + e.Reason = reason +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go new file mode 100644 index 000000000000..ce5b3e876734 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go @@ -0,0 +1,92 @@ +package cloudevents + +import ( + "fmt" + "time" +) + +var _ EventWriter = (*Event)(nil) + +// SetSpecVersion implements EventWriter.SetSpecVersion +func (e *Event) SetSpecVersion(v string) { + if e.Context == nil { + switch v { + case CloudEventsVersionV01: + e.Context = EventContextV01{}.AsV01() + case CloudEventsVersionV02: + e.Context = EventContextV02{}.AsV02() + case CloudEventsVersionV03: + e.Context = EventContextV03{}.AsV03() + default: + panic(fmt.Errorf("a valid spec version is required: [%s, %s, %s]", + CloudEventsVersionV01, CloudEventsVersionV02, CloudEventsVersionV03)) + } + return + } + if err := e.Context.SetSpecVersion(v); err != nil { + panic(err) + } +} + +// SetType implements EventWriter.SetType +func (e *Event) SetType(t string) { + if err := e.Context.SetType(t); err != nil { + panic(err) + } +} + +// SetSource implements EventWriter.SetSource +func (e *Event) SetSource(s string) { + if err := e.Context.SetSource(s); err != nil { + panic(err) + } +} + +// SetSubject implements EventWriter.SetSubject +func (e *Event) SetSubject(s string) { + if err := e.Context.SetSubject(s); err != nil { + panic(err) + } +} + +// SetID implements EventWriter.SetID +func (e *Event) SetID(id string) { + if err := e.Context.SetID(id); err != nil { + panic(err) + } +} + +// SetTime implements EventWriter.SetTime +func (e *Event) SetTime(t time.Time) { + if err := e.Context.SetTime(t); err != nil { + panic(err) + } +} + +// SetSchemaURL implements EventWriter.SetSchemaURL +func (e *Event) SetSchemaURL(s string) { + if err := e.Context.SetSchemaURL(s); err != nil { + panic(err) + } +} + +// SetDataContentType implements EventWriter.SetDataContentType +func (e *Event) SetDataContentType(ct string) { + if err := e.Context.SetDataContentType(ct); err != nil { + panic(err) + } +} + +// SetDataContentEncoding implements EventWriter.SetDataContentEncoding +func (e *Event) SetDataContentEncoding(enc string) { + if err := e.Context.SetDataContentEncoding(enc); err != nil { + panic(err) + } +} + +// SetDataContentEncoding implements EventWriter.SetDataContentEncoding +func (e *Event) SetExtension(name string, obj interface{}) { + if err := e.Context.SetExtension(name, obj); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go new file mode 100644 index 000000000000..92ad1f729566 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go @@ -0,0 +1,108 @@ +package cloudevents + +import "time" + +// EventContextReader are the methods required to be a reader of context +// attributes. +type EventContextReader interface { + // GetSpecVersion returns the native CloudEvents Spec version of the event + // context. + GetSpecVersion() string + // GetType returns the CloudEvents type from the context. + GetType() string + // GetSource returns the CloudEvents source from the context. + GetSource() string + // GetSubject returns the CloudEvents subject from the context. + GetSubject() string + // GetID returns the CloudEvents ID from the context. + GetID() string + // GetTime returns the CloudEvents creation time from the context. + GetTime() time.Time + // GetSchemaURL returns the CloudEvents schema URL (if any) from the + // context. + GetSchemaURL() string + // GetDataContentType returns content type on the context. + GetDataContentType() string + // GetDataContentEncoding returns content encoding on the context. + GetDataContentEncoding() string + + // GetDataMediaType returns the MIME media type for encoded data, which is + // needed by both encoding and decoding. This is a processed form of + // GetDataContentType and it may return an error. + GetDataMediaType() (string, error) + + // ExtensionAs populates the given interface with the CloudEvents extension + // of the given name from the extension attributes. It returns an error if + // the extension does not exist, the extension's type does not match the + // provided type, or if the type is not a supported. + ExtensionAs(string, interface{}) error + + // GetExtensions returns the full extensions map. + GetExtensions() map[string]interface{} +} + +// EventContextWriter are the methods required to be a writer of context +// attributes. +type EventContextWriter interface { + // SetSpecVersion sets the spec version of the context. + SetSpecVersion(string) error + // SetType sets the type of the context. + SetType(string) error + // SetSource sets the source of the context. + SetSource(string) error + // SetSubject sets the subject of the context. + SetSubject(string) error + // SetID sets the ID of the context. + SetID(string) error + // SetTime sets the time of the context. + SetTime(time time.Time) error + // SetSchemaURL sets the schema url of the context. + SetSchemaURL(string) error + // SetDataContentType sets the data content type of the context. + SetDataContentType(string) error + // SetDataContentEncoding sets the data context encoding of the context. + SetDataContentEncoding(string) error + + // SetExtension sets the given interface onto the extension attributes + // determined by the provided name. + SetExtension(string, interface{}) error +} + +type EventContextConverter interface { + // AsV01 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.1 field names, moving fields to or + // from extensions as necessary. + AsV01() *EventContextV01 + + // AsV02 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.2 field names, moving fields to or + // from extensions as necessary. + AsV02() *EventContextV02 + + // AsV03 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.3 field names, moving fields to or + // from extensions as necessary. + AsV03() *EventContextV03 +} + +// EventContext is conical interface for a CloudEvents Context. +type EventContext interface { + // EventContextConverter allows for conversion between versions. + EventContextConverter + + // EventContextReader adds methods for reading context. + EventContextReader + + // EventContextWriter adds methods for writing to context. + EventContextWriter + + // Validate the event based on the specifics of the CloudEvents spec version + // represented by this event context. + Validate() error + + // Clone clones the event context. + Clone() EventContext + + // String returns a pretty-printed representation of the EventContext. + String() string +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go new file mode 100644 index 000000000000..d4f416dd12b6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go @@ -0,0 +1,268 @@ +package cloudevents + +import ( + "fmt" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +const ( + // CloudEventsVersionV01 represents the version 0.1 of the CloudEvents spec. + CloudEventsVersionV01 = "0.1" +) + +// EventContextV01 holds standard metadata about an event. See +// https://github.com/cloudevents/spec/blob/v0.1/spec.md#context-attributes for +// details on these fields. +type EventContextV01 struct { + // The version of the CloudEvents specification used by the event. + CloudEventsVersion string `json:"cloudEventsVersion,omitempty"` + // ID of the event; must be non-empty and unique within the scope of the producer. + EventID string `json:"eventID"` + // Timestamp when the event happened. + EventTime *types.Timestamp `json:"eventTime,omitempty"` + // Type of occurrence which has happened. + EventType string `json:"eventType"` + // The version of the `eventType`; this is producer-specific. + EventTypeVersion *string `json:"eventTypeVersion,omitempty"` + // A link to the schema that the `data` attribute adheres to. + SchemaURL *types.URLRef `json:"schemaURL,omitempty"` + // A MIME (RFC 2046) string describing the media type of `data`. + // TODO: Should an empty string assume `application/json`, or auto-detect the content? + ContentType *string `json:"contentType,omitempty"` + // A URI describing the event producer. + Source types.URLRef `json:"source"` + // Additional metadata without a well-defined structure. + Extensions map[string]interface{} `json:"extensions,omitempty"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV01)(nil) + +// ExtensionAs implements EventContextReader.ExtensionAs +func (ec EventContextV01) ExtensionAs(name string, obj interface{}) error { + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + // Only support *string for now. + switch v := obj.(type) { + case *string: + if valueAsString, ok := value.(string); ok { + *v = valueAsString + return nil + } else { + return fmt.Errorf("invalid type for extension %q", name) + } + default: + return fmt.Errorf("unknown extension type %T", obj) + } +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. +func (ec *EventContextV01) SetExtension(name string, value interface{}) error { + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + if value == nil { + delete(ec.Extensions, name) + } else { + ec.Extensions[name] = value + } + return nil +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV01) Clone() EventContext { + return ec.AsV01() +} + +// AsV01 implements EventContextConverter.AsV01 +func (ec EventContextV01) AsV01() *EventContextV01 { + ec.CloudEventsVersion = CloudEventsVersionV01 + return &ec +} + +// AsV02 implements EventContextConverter.AsV02 +func (ec EventContextV01) AsV02() *EventContextV02 { + ret := EventContextV02{ + SpecVersion: CloudEventsVersionV02, + Type: ec.EventType, + Source: ec.Source, + ID: ec.EventID, + Time: ec.EventTime, + SchemaURL: ec.SchemaURL, + ContentType: ec.ContentType, + Extensions: make(map[string]interface{}), + } + + // eventTypeVersion was retired in v0.2, so put it in an extension. + if ec.EventTypeVersion != nil { + _ = ret.SetExtension(EventTypeVersionKey, *ec.EventTypeVersion) + } + if ec.Extensions != nil { + for k, v := range ec.Extensions { + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV01) AsV03() *EventContextV03 { + ecv2 := ec.AsV02() + return ecv2.AsV03() +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/v0.1/spec.md +func (ec EventContextV01) Validate() error { + errors := []string(nil) + + // eventType + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.EventType) + if eventType == "" { + errors = append(errors, "eventType: MUST be a non-empty string") + } + + // eventTypeVersion + // Type: String + // Constraints: + // OPTIONAL + // If present, MUST be a non-empty string + if ec.EventTypeVersion != nil { + eventTypeVersion := strings.TrimSpace(*ec.EventTypeVersion) + if eventTypeVersion == "" { + errors = append(errors, "eventTypeVersion: if present, MUST be a non-empty string") + } + } + + // cloudEventsVersion + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + cloudEventsVersion := strings.TrimSpace(ec.CloudEventsVersion) + if cloudEventsVersion == "" { + errors = append(errors, "cloudEventsVersion: MUST be a non-empty string") + } + + // source + // Type: URI + // Constraints: + // REQUIRED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors = append(errors, "source: REQUIRED") + } + + // eventID + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + eventID := strings.TrimSpace(ec.EventID) + if eventID == "" { + errors = append(errors, "eventID: MUST be a non-empty string") + + // no way to test "MUST be unique within the scope of the producer" + } + + // eventTime + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the eventTime without it being valid. + + // schemaURL + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.SchemaURL != nil { + schemaURL := strings.TrimSpace(ec.SchemaURL.String()) + // empty string is not RFC 3986 compatible. + if schemaURL == "" { + errors = append(errors, "schemaURL: if present, MUST adhere to the format specified in RFC 3986") + } + } + + // contentType + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.ContentType != nil { + contentType := strings.TrimSpace(*ec.ContentType) + if contentType == "" { + // TODO: need to test for RFC 2046 + errors = append(errors, "contentType: if present, MUST adhere to the format specified in RFC 2046") + } + } + + // extensions + // Type: Map + // Constraints: + // OPTIONAL + // If present, MUST contain at least one entry + if ec.Extensions != nil { + if len(ec.Extensions) == 0 { + errors = append(errors, "extensions: if present, MUST contain at least one entry") + } + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV01) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" cloudEventsVersion: " + ec.CloudEventsVersion + "\n") + b.WriteString(" eventType: " + ec.EventType + "\n") + if ec.EventTypeVersion != nil { + b.WriteString(" eventTypeVersion: " + *ec.EventTypeVersion + "\n") + } + b.WriteString(" source: " + ec.Source.String() + "\n") + b.WriteString(" eventID: " + ec.EventID + "\n") + if ec.EventTime != nil { + b.WriteString(" eventTime: " + ec.EventTime.String() + "\n") + } + if ec.SchemaURL != nil { + b.WriteString(" schemaURL: " + ec.SchemaURL.String() + "\n") + } + if ec.ContentType != nil { + b.WriteString(" contentType: " + *ec.ContentType + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go new file mode 100644 index 000000000000..12d46863a9a5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go @@ -0,0 +1,90 @@ +package cloudevents + +import ( + "mime" + "time" +) + +// Adhere to EventContextReader +var _ EventContextReader = (*EventContextV01)(nil) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV01) GetSpecVersion() string { + if ec.CloudEventsVersion != "" { + return ec.CloudEventsVersion + } + return CloudEventsVersionV01 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV01) GetDataContentType() string { + if ec.ContentType != nil { + return *ec.ContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV01) GetDataMediaType() (string, error) { + if ec.ContentType != nil { + mediaType, _, err := mime.ParseMediaType(*ec.ContentType) + if err != nil { + return "", err + } + return mediaType, nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV01) GetType() string { + return ec.EventType +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV01) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV01) GetSubject() string { + var sub string + if err := ec.ExtensionAs(SubjectKey, &sub); err != nil { + return "" + } + return sub +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV01) GetID() string { + return ec.EventID +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV01) GetTime() time.Time { + if ec.EventTime != nil { + return ec.EventTime.Time + } + return time.Time{} +} + +// GetSchemaURL implements EventContextReader.GetSchemaURL +func (ec EventContextV01) GetSchemaURL() string { + if ec.SchemaURL != nil { + return ec.SchemaURL.String() + } + return "" +} + +// GetDataContentEncoding implements EventContextReader.GetDataContentEncoding +func (ec EventContextV01) GetDataContentEncoding() string { + var enc string + if err := ec.ExtensionAs(DataContentEncodingKey, &enc); err != nil { + return "" + } + return enc +} + +func (ec EventContextV01) GetExtensions() map[string]interface{} { + return ec.Extensions +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go new file mode 100644 index 000000000000..7c196d939e07 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go @@ -0,0 +1,104 @@ +package cloudevents + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV01)(nil) + +// SetSpecVersion implements EventContextWriter.SetSpecVersion +func (ec *EventContextV01) SetSpecVersion(v string) error { + if v != CloudEventsVersionV01 { + return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV01) + } + ec.CloudEventsVersion = CloudEventsVersionV01 + return nil +} + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV01) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.ContentType = nil + } else { + ec.ContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV01) SetType(t string) error { + t = strings.TrimSpace(t) + ec.EventType = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV01) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URLRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV01) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + return ec.SetExtension(SubjectKey, nil) + } + return ec.SetExtension(SubjectKey, s) +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV01) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("event id is required to be a non-empty string") + } + ec.EventID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV01) SetTime(t time.Time) error { + if t.IsZero() { + ec.EventTime = nil + } else { + ec.EventTime = &types.Timestamp{Time: t} + } + return nil +} + +// SetSchemaURL implements EventContextWriter.SetSchemaURL +func (ec *EventContextV01) SetSchemaURL(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.SchemaURL = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.SchemaURL = &types.URLRef{URL: *pu} + return nil +} + +// SetDataContentEncoding implements EventContextWriter.SetDataContentEncoding +func (ec *EventContextV01) SetDataContentEncoding(e string) error { + e = strings.ToLower(strings.TrimSpace(e)) + if e == "" { + return ec.SetExtension(DataContentEncodingKey, nil) + } + return ec.SetExtension(DataContentEncodingKey, e) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go new file mode 100644 index 000000000000..ed4affc388f2 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go @@ -0,0 +1,286 @@ +package cloudevents + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +const ( + // CloudEventsVersionV02 represents the version 0.2 of the CloudEvents spec. + CloudEventsVersionV02 = "0.2" +) + +// EventContextV02 represents the non-data attributes of a CloudEvents v0.2 +// event. +type EventContextV02 struct { + // The version of the CloudEvents specification used by the event. + SpecVersion string `json:"specversion"` + // The type of the occurrence which has happened. + Type string `json:"type"` + // A URI describing the event producer. + Source types.URLRef `json:"source"` + // ID of the event; must be non-empty and unique within the scope of the producer. + ID string `json:"id"` + // Timestamp when the event happened. + Time *types.Timestamp `json:"time,omitempty"` + // A link to the schema that the `data` attribute adheres to. + SchemaURL *types.URLRef `json:"schemaurl,omitempty"` + // A MIME (RFC2046) string describing the media type of `data`. + // TODO: Should an empty string assume `application/json`, `application/octet-stream`, or auto-detect the content? + ContentType *string `json:"contenttype,omitempty"` + // Additional extension metadata beyond the base spec. + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV02)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV02) ExtensionAs(name string, obj interface{}) error { + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Try to unmarshal extension if we find it as a RawMessage. + switch v := value.(type) { + case json.RawMessage: + if err := json.Unmarshal(v, obj); err == nil { + // if that worked, return with obj set. + return nil + } + } + // else try as a string ptr. + + // Only support *string for now. + switch v := obj.(type) { + case *string: + if valueAsString, ok := value.(string); ok { + *v = valueAsString + return nil + } else { + return fmt.Errorf("invalid type for extension %q", name) + } + default: + return fmt.Errorf("unknown extension type %T", obj) + } +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. +func (ec *EventContextV02) SetExtension(name string, value interface{}) error { + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + if value == nil { + delete(ec.Extensions, name) + } else { + ec.Extensions[name] = value + } + return nil +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV02) Clone() EventContext { + return ec.AsV02() +} + +// AsV01 implements EventContextConverter.AsV01 +func (ec EventContextV02) AsV01() *EventContextV01 { + ret := EventContextV01{ + CloudEventsVersion: CloudEventsVersionV01, + EventID: ec.ID, + EventTime: ec.Time, + EventType: ec.Type, + SchemaURL: ec.SchemaURL, + Source: ec.Source, + ContentType: ec.ContentType, + Extensions: make(map[string]interface{}), + } + + for k, v := range ec.Extensions { + // eventTypeVersion was retired in v0.2 + if strings.EqualFold(k, EventTypeVersionKey) { + etv, ok := v.(string) + if ok && etv != "" { + ret.EventTypeVersion = &etv + } + continue + } + ret.Extensions[k] = v + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// AsV02 implements EventContextConverter.AsV02 +func (ec EventContextV02) AsV02() *EventContextV02 { + ec.SpecVersion = CloudEventsVersionV02 + return &ec +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV02) AsV03() *EventContextV03 { + ret := EventContextV03{ + SpecVersion: CloudEventsVersionV03, + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + SchemaURL: ec.SchemaURL, + DataContentType: ec.ContentType, + Source: ec.Source, + Extensions: make(map[string]interface{}), + } + + for k, v := range ec.Extensions { + // Subject was introduced in 0.3 + if strings.EqualFold(k, SubjectKey) { + sub, ok := v.(string) + if ok && sub != "" { + ret.Subject = &sub + } + continue + } + // DataContentEncoding was introduced in 0.3 + if strings.EqualFold(k, DataContentEncodingKey) { + etv, ok := v.(string) + if ok && etv != "" { + ret.DataContentEncoding = &etv + } + continue + } + ret.Extensions[k] = v + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + + return &ret +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/v0.2/spec.md +func (ec EventContextV02) Validate() error { + errors := []string(nil) + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors = append(errors, "type: MUST be a non-empty string") + } + + // specversion + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + specVersion := strings.TrimSpace(ec.SpecVersion) + if specVersion == "" { + errors = append(errors, "specversion: MUST be a non-empty string") + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors = append(errors, "source: REQUIRED") + } + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors = append(errors, "id: MUST be a non-empty string") + + // no way to test "MUST be unique within the scope of the producer" + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + // schemaurl + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.SchemaURL != nil { + schemaURL := strings.TrimSpace(ec.SchemaURL.String()) + // empty string is not RFC 3986 compatible. + if schemaURL == "" { + errors = append(errors, "schemaurl: if present, MUST adhere to the format specified in RFC 3986") + } + } + + // contenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.ContentType != nil { + contentType := strings.TrimSpace(*ec.ContentType) + if contentType == "" { + // TODO: need to test for RFC 2046 + errors = append(errors, "contenttype: if present, MUST adhere to the format specified in RFC 2046") + } + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV02) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + ec.SpecVersion + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.SchemaURL != nil { + b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n") + } + if ec.ContentType != nil { + b.WriteString(" contenttype: " + *ec.ContentType + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go new file mode 100644 index 000000000000..224066e9829c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go @@ -0,0 +1,90 @@ +package cloudevents + +import ( + "mime" + "time" +) + +// Adhere to EventContextReader +var _ EventContextReader = (*EventContextV02)(nil) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV02) GetSpecVersion() string { + if ec.SpecVersion != "" { + return ec.SpecVersion + } + return CloudEventsVersionV02 +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV02) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV02) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV02) GetSubject() string { + var sub string + if err := ec.ExtensionAs(SubjectKey, &sub); err != nil { + return "" + } + return sub +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV02) GetID() string { + return ec.ID +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV02) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetSchemaURL implements EventContextReader.GetSchemaURL +func (ec EventContextV02) GetSchemaURL() string { + if ec.SchemaURL != nil { + return ec.SchemaURL.String() + } + return "" +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV02) GetDataContentType() string { + if ec.ContentType != nil { + return *ec.ContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV02) GetDataMediaType() (string, error) { + if ec.ContentType != nil { + mediaType, _, err := mime.ParseMediaType(*ec.ContentType) + if err != nil { + return "", err + } + return mediaType, nil + } + return "", nil +} + +// GetDataContentEncoding implements EventContextReader.GetDataContentEncoding +func (ec EventContextV02) GetDataContentEncoding() string { + var enc string + if err := ec.ExtensionAs(DataContentEncodingKey, &enc); err != nil { + return "" + } + return enc +} + +func (ec EventContextV02) GetExtensions() map[string]interface{} { + return ec.Extensions +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go new file mode 100644 index 000000000000..8935e93d79c2 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go @@ -0,0 +1,104 @@ +package cloudevents + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV02)(nil) + +// SetSpecVersion implements EventContextWriter.SetSpecVersion +func (ec *EventContextV02) SetSpecVersion(v string) error { + if v != CloudEventsVersionV02 { + return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV02) + } + ec.SpecVersion = CloudEventsVersionV02 + return nil +} + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV02) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.ContentType = nil + } else { + ec.ContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV02) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV02) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URLRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV02) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + return ec.SetExtension(SubjectKey, nil) + } + return ec.SetExtension(SubjectKey, s) +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV02) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV02) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetSchemaURL implements EventContextWriter.SetSchemaURL +func (ec *EventContextV02) SetSchemaURL(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.SchemaURL = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.SchemaURL = &types.URLRef{URL: *pu} + return nil +} + +// SetDataContentEncoding implements EventContextWriter.SetDataContentEncoding +func (ec *EventContextV02) SetDataContentEncoding(e string) error { + e = strings.ToLower(strings.TrimSpace(e)) + if e == "" { + return ec.SetExtension(DataContentEncodingKey, nil) + } + return ec.SetExtension(DataContentEncodingKey, e) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go new file mode 100644 index 000000000000..5f97c043e41a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go @@ -0,0 +1,296 @@ +package cloudevents + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +// WIP: AS OF FEB 19, 2019 + +const ( + // CloudEventsVersionV03 represents the version 0.3 of the CloudEvents spec. + CloudEventsVersionV03 = "0.3" +) + +// EventContextV03 represents the non-data attributes of a CloudEvents v0.3 +// event. +type EventContextV03 struct { + // SpecVersion - The version of the CloudEvents specification used by the event. + SpecVersion string `json:"specversion"` + // Type - The type of the occurrence which has happened. + Type string `json:"type"` + // Source - A URI describing the event producer. + Source types.URLRef `json:"source"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + Subject *string `json:"subject,omitempty"` + // ID of the event; must be non-empty and unique within the scope of the producer. + ID string `json:"id"` + // Time - A Timestamp when the event happened. + Time *types.Timestamp `json:"time,omitempty"` + // SchemaURL - A link to the schema that the `data` attribute adheres to. + SchemaURL *types.URLRef `json:"schemaurl,omitempty"` + // GetDataMediaType - A MIME (RFC2046) string describing the media type of `data`. + // TODO: Should an empty string assume `application/json`, `application/octet-stream`, or auto-detect the content? + DataContentType *string `json:"datacontenttype,omitempty"` + // DataContentEncoding describes the content encoding for the `data` attribute. Valid: nil, `Base64`. + DataContentEncoding *string `json:"datacontentencoding,omitempty"` + // Extensions - Additional extension metadata beyond the base spec. + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV03)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error { + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Try to unmarshal extension if we find it as a RawMessage. + switch v := value.(type) { + case json.RawMessage: + if err := json.Unmarshal(v, obj); err == nil { + // if that worked, return with obj set. + return nil + } + } + // else try as a string ptr. + + // Only support *string for now. + switch v := obj.(type) { + case *string: + if valueAsString, ok := value.(string); ok { + *v = valueAsString + return nil + } else { + return fmt.Errorf("invalid type for extension %q", name) + } + default: + return fmt.Errorf("unknown extension type %T", obj) + } +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. +func (ec *EventContextV03) SetExtension(name string, value interface{}) error { + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + if value == nil { + delete(ec.Extensions, name) + } else { + ec.Extensions[name] = value + } + return nil +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV03) Clone() EventContext { + return ec.AsV03() +} + +// AsV01 implements EventContextConverter.AsV01 +func (ec EventContextV03) AsV01() *EventContextV01 { + ecv2 := ec.AsV02() + return ecv2.AsV01() +} + +// AsV02 implements EventContextConverter.AsV02 +func (ec EventContextV03) AsV02() *EventContextV02 { + ret := EventContextV02{ + SpecVersion: CloudEventsVersionV02, + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + SchemaURL: ec.SchemaURL, + ContentType: ec.DataContentType, + Source: ec.Source, + Extensions: make(map[string]interface{}), + } + // Subject was introduced in 0.3, so put it in an extension for 0.2. + if ec.Subject != nil { + _ = ret.SetExtension(SubjectKey, *ec.Subject) + } + // DataContentEncoding was introduced in 0.3, so put it in an extension for 0.2. + if ec.DataContentEncoding != nil { + _ = ret.SetExtension(DataContentEncodingKey, *ec.DataContentEncoding) + } + if ec.Extensions != nil { + for k, v := range ec.Extensions { + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV03) AsV03() *EventContextV03 { + ec.SpecVersion = CloudEventsVersionV03 + return &ec +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/master/spec.md +// As of Feb 26, 2019, commit 17c32ea26baf7714ad027d9917d03d2fff79fc7e +// + https://github.com/cloudevents/spec/pull/387 -> datacontentencoding +// + https://github.com/cloudevents/spec/pull/406 -> subject +func (ec EventContextV03) Validate() error { + errors := []string(nil) + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors = append(errors, "type: MUST be a non-empty string") + } + + // specversion + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + specVersion := strings.TrimSpace(ec.SpecVersion) + if specVersion == "" { + errors = append(errors, "specversion: MUST be a non-empty string") + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors = append(errors, "source: REQUIRED") + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors = append(errors, "subject: if present, MUST be a non-empty string") + } + } + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors = append(errors, "id: MUST be a non-empty string") + + // no way to test "MUST be unique within the scope of the producer" + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + // schemaurl + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.SchemaURL != nil { + schemaURL := strings.TrimSpace(ec.SchemaURL.String()) + // empty string is not RFC 3986 compatible. + if schemaURL == "" { + errors = append(errors, "schemaurl: if present, MUST adhere to the format specified in RFC 3986") + } + } + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + // TODO: need to test for RFC 2046 + errors = append(errors, "datacontenttype: if present, MUST adhere to the format specified in RFC 2046") + } + } + + // datacontentencoding + // Type: String per RFC 2045 Section 6.1 + // Constraints: + // The attribute MUST be set if the data attribute contains string-encoded binary data. + // Otherwise the attribute MUST NOT be set. + // If present, MUST adhere to RFC 2045 Section 6.1 + if ec.DataContentEncoding != nil { + dataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding)) + if dataContentEncoding != Base64 { + // TODO: need to test for RFC 2046 + errors = append(errors, "datacontentencoding: if present, MUST adhere to RFC 2045 Section 6.1") + } + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV03) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + ec.SpecVersion + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.SchemaURL != nil { + b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + if ec.DataContentEncoding != nil { + b.WriteString(" datacontentencoding: " + *ec.DataContentEncoding + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go new file mode 100644 index 000000000000..e4d9985085c0 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go @@ -0,0 +1,85 @@ +package cloudevents + +import ( + "mime" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV03) GetSpecVersion() string { + if ec.SpecVersion != "" { + return ec.SpecVersion + } + return CloudEventsVersionV03 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV03) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV03) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + mediaType, _, err := mime.ParseMediaType(*ec.DataContentType) + if err != nil { + return "", err + } + return mediaType, nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV03) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV03) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV03) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV03) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV03) GetID() string { + return ec.ID +} + +// GetSchemaURL implements EventContextReader.GetSchemaURL +func (ec EventContextV03) GetSchemaURL() string { + if ec.SchemaURL != nil { + return ec.SchemaURL.String() + } + return "" +} + +// GetDataContentEncoding implements EventContextReader.GetDataContentEncoding +func (ec EventContextV03) GetDataContentEncoding() string { + if ec.DataContentEncoding != nil { + return *ec.DataContentEncoding + } + return "" +} + +func (ec EventContextV03) GetExtensions() map[string]interface{} { + return ec.Extensions +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go new file mode 100644 index 000000000000..9370d2a3d949 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go @@ -0,0 +1,108 @@ +package cloudevents + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV03)(nil) + +// SetSpecVersion implements EventContextWriter.SetSpecVersion +func (ec *EventContextV03) SetSpecVersion(v string) error { + if v != CloudEventsVersionV03 { + return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV03) + } + ec.SpecVersion = CloudEventsVersionV03 + return nil +} + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV03) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV03) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV03) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URLRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV03) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV03) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV03) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetSchemaURL implements EventContextWriter.SetSchemaURL +func (ec *EventContextV03) SetSchemaURL(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.SchemaURL = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.SchemaURL = &types.URLRef{URL: *pu} + return nil +} + +// SetDataContentEncoding implements EventContextWriter.SetDataContentEncoding +func (ec *EventContextV03) SetDataContentEncoding(e string) error { + e = strings.ToLower(strings.TrimSpace(e)) + if e == "" { + ec.DataContentEncoding = nil + } else { + ec.DataContentEncoding = &e + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go new file mode 100644 index 000000000000..e33205fc8b5d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go @@ -0,0 +1,13 @@ +package cloudevents + +const ( + // DataContentEncodingKey is the key to DataContentEncoding for versions that do not support data content encoding + // directly. + DataContentEncodingKey = "datacontentencoding" + + // EventTypeVersionKey is the key to EventTypeVersion for versions that do not support event type version directly. + EventTypeVersionKey = "eventTypeVersion" + + // SubjectKey is the key to Subject for versions that do not support subject directly. + SubjectKey = "subject" +) diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/doc.go new file mode 100644 index 000000000000..3067ebe7e5d0 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/doc.go @@ -0,0 +1,4 @@ +/* +Package observability holds metrics and tracing recording implementations. +*/ +package observability diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/keys.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/keys.go new file mode 100644 index 000000000000..f032b10ecf73 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/keys.go @@ -0,0 +1,19 @@ +package observability + +import ( + "go.opencensus.io/tag" +) + +var ( + // KeyMethod is the tag used for marking method on a metric. + KeyMethod, _ = tag.NewKey("method") + // KeyResult is the tag used for marking result on a metric. + KeyResult, _ = tag.NewKey("result") +) + +const ( + // ResultError is a shared result tag value for error. + ResultError = "error" + // ResultOK is a shared result tag value for success. + ResultOK = "success" +) diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/observer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/observer.go new file mode 100644 index 000000000000..76e7b12fda2e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/observer.go @@ -0,0 +1,109 @@ +package observability + +import ( + "context" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" +) + +// Observable represents the the customization used by the Reporter for a given +// measurement and trace for a single method. +type Observable interface { + TraceName() string + MethodName() string + LatencyMs() *stats.Float64Measure +} + +// Reporter represents a running latency counter and trace span. When Error or +// OK are called, the latency is calculated and the trace space is ended. Error +// or OK are only allowed to be called once. +type Reporter interface { + Error() + OK() +} + +type reporter struct { + ctx context.Context + span *trace.Span + on Observable + start time.Time + once sync.Once +} + +// All tags used for Latency measurements. +func LatencyTags() []tag.Key { + return []tag.Key{KeyMethod, KeyResult} +} + +var ( + // Tracing is disabled by default. It is very useful for profiling an + // application. + tracingEnabled = false +) + +// EnableTracing allows control over if tracing is enabled for the sdk. +// Default is false. This applies to all of the +// `github.com/cloudevents/sdk-go/...` package. +func EnableTracing(enabled bool) { + tracingEnabled = enabled +} + +// NewReporter creates and returns a reporter wrapping the provided Observable, +// and injects a trace span into the context. +func NewReporter(ctx context.Context, on Observable) (context.Context, Reporter) { + var span *trace.Span + if tracingEnabled { + ctx, span = trace.StartSpan(ctx, on.TraceName()) + } + r := &reporter{ + ctx: ctx, + on: on, + span: span, + start: time.Now(), + } + r.tagMethod() + return ctx, r +} + +func (r *reporter) tagMethod() { + var err error + r.ctx, err = tag.New(r.ctx, tag.Insert(KeyMethod, r.on.MethodName())) + if err != nil { + panic(err) // or ignore? + } +} + +func (r *reporter) record() { + ms := float64(time.Since(r.start) / time.Millisecond) + stats.Record(r.ctx, r.on.LatencyMs().M(ms)) + if r.span != nil { + r.span.End() + } +} + +// Error records the result as an error. +func (r *reporter) Error() { + r.once.Do(func() { + r.result(ResultError) + }) +} + +// OK records the result as a success. +func (r *reporter) OK() { + r.once.Do(func() { + r.result(ResultOK) + }) +} + +func (r *reporter) result(v string) { + var err error + r.ctx, err = tag.New(r.ctx, tag.Insert(KeyResult, v)) + if err != nil { + panic(err) // or ignore? + } + r.record() +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/codec.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/codec.go new file mode 100644 index 000000000000..091064c9155b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/codec.go @@ -0,0 +1,35 @@ +package transport + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" +) + +// Codec is the interface for transport codecs to convert between transport +// specific payloads and the Message interface. +type Codec interface { + Encode(context.Context, cloudevents.Event) (Message, error) + Decode(context.Context, Message) (*cloudevents.Event, error) +} + +// ErrMessageEncodingUnknown is an error produced when the encoding for an incoming +// message can not be understood. +type ErrMessageEncodingUnknown struct { + codec string + transport string +} + +// NewErrMessageEncodingUnknown makes a new ErrMessageEncodingUnknown. +func NewErrMessageEncodingUnknown(codec, transport string) *ErrMessageEncodingUnknown { + return &ErrMessageEncodingUnknown{ + codec: codec, + transport: transport, + } +} + +// Error implements error.Error +func (e *ErrMessageEncodingUnknown) Error() string { + return fmt.Sprintf("message encoding unknown for %s codec on %s transport", e.codec, e.transport) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/doc.go new file mode 100644 index 000000000000..c2cbadde0d27 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/doc.go @@ -0,0 +1,12 @@ +/* + +Package transport defines interfaces to decouple the client package +from transport implementations. + +Most event sender and receiver applications should not use this +package, they should use the client package. This package is for +infrastructure developers implementing new transports, or intermediary +components like importers, channels or brokers. + +*/ +package transport diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/error.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/error.go new file mode 100644 index 000000000000..95e0f342e62c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/error.go @@ -0,0 +1,30 @@ +package transport + +import "fmt" + +// ErrTransportMessageConversion is an error produced when the transport +// message can not be converted. +type ErrTransportMessageConversion struct { + fatal bool + transport string + message string +} + +// NewErrMessageEncodingUnknown makes a new ErrMessageEncodingUnknown. +func NewErrTransportMessageConversion(transport, message string, fatal bool) *ErrTransportMessageConversion { + return &ErrTransportMessageConversion{ + transport: transport, + message: message, + fatal: fatal, + } +} + +// IsFatal reports if this error should be considered fatal. +func (e *ErrTransportMessageConversion) IsFatal() bool { + return e.fatal +} + +// Error implements error.Error +func (e *ErrTransportMessageConversion) Error() string { + return fmt.Sprintf("transport %s failed to convert message: %s", e.transport, e.message) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go new file mode 100644 index 000000000000..6774111e52d5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go @@ -0,0 +1,176 @@ +package http + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" +) + +// Codec is the wrapper for all versions of codecs supported by the http +// transport. +type Codec struct { + // Encoding is the setting to inform the DefaultEncodingSelectionFn for + // selecting a codec. + Encoding Encoding + + // DefaultEncodingSelectionFn allows for encoding selection strategies to be injected. + DefaultEncodingSelectionFn EncodingSelector + + v01 *CodecV01 + v02 *CodecV02 + v03 *CodecV03 +} + +// Adheres to Codec +var _ transport.Codec = (*Codec)(nil) + +// Encode encodes the provided event into a transport message. +func (c *Codec) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + encoding := c.Encoding + + if encoding == Default && c.DefaultEncodingSelectionFn != nil { + encoding = c.DefaultEncodingSelectionFn(ctx, e) + } + + switch encoding { + case Default: + fallthrough + case BinaryV01: + fallthrough + case StructuredV01: + if c.v01 == nil { + c.v01 = &CodecV01{Encoding: encoding} + } + return c.v01.Encode(ctx, e) + case BinaryV02: + fallthrough + case StructuredV02: + if c.v02 == nil { + c.v02 = &CodecV02{Encoding: encoding} + } + return c.v02.Encode(ctx, e) + case BinaryV03: + fallthrough + case StructuredV03: + if c.v03 == nil { + c.v03 = &CodecV03{Encoding: encoding} + } + return c.v03.Encode(ctx, e) + default: + return nil, fmt.Errorf("unknown encoding: %s", encoding) + } +} + +// Decode converts a provided transport message into an Event, or error. +func (c *Codec) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + switch c.inspectEncoding(ctx, msg) { + case BinaryV01, StructuredV01: + if c.v01 == nil { + c.v01 = &CodecV01{Encoding: c.Encoding} + } + if event, err := c.v01.Decode(ctx, msg); err != nil { + return nil, err + } else { + return c.convertEvent(event), nil + } + + case BinaryV02, StructuredV02: + if c.v02 == nil { + c.v02 = &CodecV02{Encoding: c.Encoding} + } + if event, err := c.v02.Decode(ctx, msg); err != nil { + return nil, err + } else { + return c.convertEvent(event), nil + } + + case BinaryV03, StructuredV03, BatchedV03: + if c.v03 == nil { + c.v03 = &CodecV03{Encoding: c.Encoding} + } + if event, err := c.v03.Decode(ctx, msg); err != nil { + return nil, err + } else { + return c.convertEvent(event), nil + } + default: + return nil, transport.NewErrMessageEncodingUnknown("wrapper", TransportName) + } +} + +// Give the context back as the user expects +func (c *Codec) convertEvent(event *cloudevents.Event) *cloudevents.Event { + if event == nil { + return nil + } + switch c.Encoding { + case Default: + return event + case BinaryV01: + fallthrough + case StructuredV01: + if c.v01 == nil { + c.v01 = &CodecV01{Encoding: c.Encoding} + } + ca := event.Context.AsV01() + event.Context = ca + return event + case BinaryV02: + fallthrough + case StructuredV02: + if c.v02 == nil { + c.v02 = &CodecV02{Encoding: c.Encoding} + } + ca := event.Context.AsV02() + event.Context = ca + return event + case BinaryV03: + fallthrough + case StructuredV03: + fallthrough + case BatchedV03: + if c.v03 == nil { + c.v03 = &CodecV03{Encoding: c.Encoding} + } + ca := event.Context.AsV03() + event.Context = ca + return event + default: + return nil + } +} + +func (c *Codec) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { + // TODO: there should be a better way to make the version codecs on demand. + if c.v01 == nil { + c.v01 = &CodecV01{Encoding: c.Encoding} + } + // Try v0.1 first. + encoding := c.v01.inspectEncoding(ctx, msg) + if encoding != Unknown { + return encoding + } + + if c.v02 == nil { + c.v02 = &CodecV02{Encoding: c.Encoding} + } + // Try v0.2. + encoding = c.v02.inspectEncoding(ctx, msg) + if encoding != Unknown { + return encoding + } + + if c.v03 == nil { + c.v03 = &CodecV03{Encoding: c.Encoding} + } + // Try v0.3. + encoding = c.v03.inspectEncoding(ctx, msg) + if encoding != Unknown { + return encoding + } + + // We do not understand the message encoding. + return Unknown +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_structured.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_structured.go new file mode 100644 index 000000000000..098cb5a15693 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_structured.go @@ -0,0 +1,44 @@ +package http + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" +) + +// CodecStructured represents an structured http transport codec for all versions. +// Intended to be used as a base class. +type CodecStructured struct { + Encoding Encoding +} + +func (v CodecStructured) encodeStructured(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + header := http.Header{} + header.Set("Content-Type", cloudevents.ApplicationCloudEventsJSON) + + body, err := json.Marshal(e) + if err != nil { + return nil, err + } + + msg := &Message{ + Header: header, + Body: body, + } + + return msg, nil +} + +func (v CodecStructured) decodeStructured(ctx context.Context, version string, msg transport.Message) (*cloudevents.Event, error) { + m, ok := msg.(*Message) + if !ok { + return nil, fmt.Errorf("failed to convert transport.Message to http.Message") + } + event := cloudevents.New(version) + err := json.Unmarshal(m.Body, &event) + return &event, err +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go new file mode 100644 index 000000000000..e414c090a4b2 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go @@ -0,0 +1,221 @@ +package http + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +// CodecV01 represents a http transport codec that uses CloudEvents spec v0.3 +type CodecV01 struct { + CodecStructured + + Encoding Encoding +} + +// Adheres to Codec +var _ transport.Codec = (*CodecV01)(nil) + +// Encode implements Codec.Encode +func (v CodecV01) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + // TODO: wire context + _, r := observability.NewReporter(context.Background(), CodecObserved{o: reportEncode, c: v.Encoding.Codec()}) + m, err := v.obsEncode(ctx, e) + if err != nil { + r.Error() + } else { + r.OK() + } + return m, err +} + +func (v CodecV01) obsEncode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + switch v.Encoding { + case Default: + fallthrough + case BinaryV01: + return v.encodeBinary(ctx, e) + case StructuredV01: + return v.encodeStructured(ctx, e) + default: + return nil, fmt.Errorf("unknown encoding: %d", v.Encoding) + } +} + +// Decode implements Codec.Decode +func (v CodecV01) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + // TODO: wire context + _, r := observability.NewReporter(ctx, CodecObserved{o: reportDecode, c: v.inspectEncoding(ctx, msg).Codec()}) // TODO: inspectEncoding is not free. + e, err := v.obsDecode(ctx, msg) + if err != nil { + r.Error() + } else { + r.OK() + } + return e, err +} + +func (v CodecV01) obsDecode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + switch v.inspectEncoding(ctx, msg) { + case BinaryV01: + return v.decodeBinary(ctx, msg) + case StructuredV01: + return v.decodeStructured(ctx, cloudevents.CloudEventsVersionV01, msg) + default: + return nil, transport.NewErrMessageEncodingUnknown("v01", TransportName) + } +} + +func (v CodecV01) encodeBinary(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + header, err := v.toHeaders(e.Context.AsV01()) + if err != nil { + return nil, err + } + + body, err := e.DataBytes() + if err != nil { + panic("encode") + } + + msg := &Message{ + Header: header, + Body: body, + } + + return msg, nil +} + +func (v CodecV01) toHeaders(ec *cloudevents.EventContextV01) (http.Header, error) { + // Preserve case in v0.1, even though HTTP headers are case-insensitive. + h := http.Header{} + h["CE-CloudEventsVersion"] = []string{ec.CloudEventsVersion} + h["CE-EventID"] = []string{ec.EventID} + h["CE-EventType"] = []string{ec.EventType} + h["CE-Source"] = []string{ec.Source.String()} + if ec.EventTime != nil && !ec.EventTime.IsZero() { + h["CE-EventTime"] = []string{ec.EventTime.String()} + } + if ec.EventTypeVersion != nil { + h["CE-EventTypeVersion"] = []string{*ec.EventTypeVersion} + } + if ec.SchemaURL != nil { + h["CE-SchemaURL"] = []string{ec.SchemaURL.String()} + } + if ec.ContentType != nil { + h.Set("Content-Type", *ec.ContentType) + } else if v.Encoding == Default || v.Encoding == BinaryV01 { + // in binary v0.1, the Content-Type header is tied to ec.ContentType + // This was later found to be an issue with the spec, but yolo. + // TODO: not sure what the default should be? + h.Set("Content-Type", cloudevents.ApplicationJSON) + } + + // Regarding Extensions, v0.1 Spec says the following: + // * Each map entry name MUST be prefixed with "CE-X-" + // * Each map entry name's first character MUST be capitalized + for k, v := range ec.Extensions { + encoded, err := json.Marshal(v) + if err != nil { + return nil, err + } + h["CE-X-"+strings.Title(k)] = []string{string(encoded)} + } + return h, nil +} + +func (v CodecV01) decodeBinary(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + m, ok := msg.(*Message) + if !ok { + return nil, fmt.Errorf("failed to convert transport.Message to http.Message") + } + ca, err := v.fromHeaders(m.Header) + if err != nil { + return nil, err + } + var body interface{} + if len(m.Body) > 0 { + body = m.Body + } + return &cloudevents.Event{ + Context: &ca, + Data: body, + DataEncoded: body != nil, + }, nil +} + +func (v CodecV01) fromHeaders(h http.Header) (cloudevents.EventContextV01, error) { + // Normalize headers. + for k, v := range h { + ck := textproto.CanonicalMIMEHeaderKey(k) + if k != ck { + h[ck] = v + } + } + + ec := cloudevents.EventContextV01{} + ec.CloudEventsVersion = h.Get("CE-CloudEventsVersion") + h.Del("CE-CloudEventsVersion") + ec.EventID = h.Get("CE-EventID") + h.Del("CE-EventID") + ec.EventType = h.Get("CE-EventType") + h.Del("CE-EventType") + source := types.ParseURLRef(h.Get("CE-Source")) + h.Del("CE-Source") + if source != nil { + ec.Source = *source + } + ec.EventTime = types.ParseTimestamp(h.Get("CE-EventTime")) + h.Del("CE-EventTime") + etv := h.Get("CE-EventTypeVersion") + h.Del("CE-EventTypeVersion") + if etv != "" { + ec.EventTypeVersion = &etv + } + ec.SchemaURL = types.ParseURLRef(h.Get("CE-SchemaURL")) + h.Del("CE-SchemaURL") + et := h.Get("Content-Type") + ec.ContentType = &et + + extensions := make(map[string]interface{}) + for k, v := range h { + if len(k) > len("CE-X-") && strings.EqualFold(k[:len("CE-X-")], "CE-X-") { + key := k[len("CE-X-"):] + var tmp interface{} + if err := json.Unmarshal([]byte(v[0]), &tmp); err == nil { + extensions[key] = tmp + } else { + // If we can't unmarshal the data, treat it as a string. + extensions[key] = v[0] + } + h.Del(k) + } + } + if len(extensions) > 0 { + ec.Extensions = extensions + } + return ec, nil +} + +func (v CodecV01) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { + version := msg.CloudEventsVersion() + if version != cloudevents.CloudEventsVersionV01 { + return Unknown + } + m, ok := msg.(*Message) + if !ok { + return Unknown + } + contentType := m.Header.Get("Content-Type") + if contentType == cloudevents.ApplicationCloudEventsJSON { + return StructuredV01 + } + return BinaryV01 +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go new file mode 100644 index 000000000000..939e60204ed3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go @@ -0,0 +1,252 @@ +package http + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +// CodecV02 represents a http transport codec that uses CloudEvents spec v0.2 +type CodecV02 struct { + CodecStructured + + Encoding Encoding +} + +// Adheres to Codec +var _ transport.Codec = (*CodecV02)(nil) + +// Encode implements Codec.Encode +func (v CodecV02) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + // TODO: wire context + _, r := observability.NewReporter(ctx, CodecObserved{o: reportEncode, c: v.Encoding.Codec()}) + m, err := v.obsEncode(ctx, e) + if err != nil { + r.Error() + } else { + r.OK() + } + return m, err +} + +func (v CodecV02) obsEncode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + switch v.Encoding { + case Default: + fallthrough + case BinaryV02: + return v.encodeBinary(ctx, e) + case StructuredV02: + return v.encodeStructured(ctx, e) + default: + return nil, fmt.Errorf("unknown encoding: %d", v.Encoding) + } +} + +// Decode implements Codec.Decode +func (v CodecV02) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + // TODO: wire context + _, r := observability.NewReporter(ctx, CodecObserved{o: reportDecode, c: v.inspectEncoding(ctx, msg).Codec()}) // TODO: inspectEncoding is not free. + e, err := v.obsDecode(ctx, msg) + if err != nil { + r.Error() + } else { + r.OK() + } + return e, err +} + +func (v CodecV02) obsDecode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + switch v.inspectEncoding(ctx, msg) { + case BinaryV02: + return v.decodeBinary(ctx, msg) + case StructuredV02: + return v.decodeStructured(ctx, cloudevents.CloudEventsVersionV02, msg) + default: + return nil, transport.NewErrMessageEncodingUnknown("v02", TransportName) + } +} + +func (v CodecV02) encodeBinary(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + header, err := v.toHeaders(e.Context.AsV02()) + if err != nil { + return nil, err + } + body, err := e.DataBytes() + if err != nil { + return nil, err + } + + msg := &Message{ + Header: header, + Body: body, + } + + return msg, nil +} + +func (v CodecV02) toHeaders(ec *cloudevents.EventContextV02) (http.Header, error) { + h := http.Header{} + h.Set("ce-specversion", ec.SpecVersion) + h.Set("ce-type", ec.Type) + h.Set("ce-source", ec.Source.String()) + h.Set("ce-id", ec.ID) + if ec.Time != nil && !ec.Time.IsZero() { + h.Set("ce-time", ec.Time.String()) + } + if ec.SchemaURL != nil { + h.Set("ce-schemaurl", ec.SchemaURL.String()) + } + if ec.ContentType != nil { + h.Set("Content-Type", *ec.ContentType) + } else if v.Encoding == Default || v.Encoding == BinaryV02 { + // in binary v0.2, the Content-Type header is tied to ec.ContentType + // This was later found to be an issue with the spec, but yolo. + // TODO: not sure what the default should be? + h.Set("Content-Type", cloudevents.ApplicationJSON) + } + for k, v := range ec.Extensions { + // Per spec, map-valued extensions are converted to a list of headers as: + // CE-attrib-key + if mapVal, ok := v.(map[string]interface{}); ok { + for subkey, subval := range mapVal { + encoded, err := json.Marshal(subval) + if err != nil { + return nil, err + } + h.Set("ce-"+k+"-"+subkey, string(encoded)) + } + continue + } + encoded, err := json.Marshal(v) + if err != nil { + return nil, err + } + h.Set("ce-"+k, string(encoded)) + } + + return h, nil +} + +func (v CodecV02) decodeBinary(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + m, ok := msg.(*Message) + if !ok { + return nil, fmt.Errorf("failed to convert transport.Message to http.Message") + } + ca, err := v.fromHeaders(m.Header) + if err != nil { + return nil, err + } + var body interface{} + if len(m.Body) > 0 { + body = m.Body + } + return &cloudevents.Event{ + Context: &ca, + Data: body, + DataEncoded: body != nil, + }, nil +} + +func (v CodecV02) fromHeaders(h http.Header) (cloudevents.EventContextV02, error) { + // Normalize headers. + for k, v := range h { + ck := textproto.CanonicalMIMEHeaderKey(k) + if k != ck { + delete(h, k) + h[ck] = v + } + } + + ec := cloudevents.EventContextV02{} + + ec.SpecVersion = h.Get("ce-specversion") + h.Del("ce-specversion") + + ec.ID = h.Get("ce-id") + h.Del("ce-id") + + ec.Type = h.Get("ce-type") + h.Del("ce-type") + + source := types.ParseURLRef(h.Get("ce-source")) + if source != nil { + ec.Source = *source + } + h.Del("ce-source") + + ec.Time = types.ParseTimestamp(h.Get("ce-time")) + h.Del("ce-time") + + ec.SchemaURL = types.ParseURLRef(h.Get("ce-schemaurl")) + h.Del("ce-schemaurl") + + contentType := h.Get("Content-Type") + if contentType != "" { + ec.ContentType = &contentType + } + h.Del("Content-Type") + + // At this point, we have deleted all the known headers. + // Everything left is assumed to be an extension. + + extensions := make(map[string]interface{}) + for k, v := range h { + if len(k) > len("ce-") && strings.EqualFold(k[:len("ce-")], "ce-") { + ak := strings.ToLower(k[len("ce-"):]) + if i := strings.Index(ak, "-"); i > 0 { + // attrib-key + attrib := ak[:i] + key := ak[(i + 1):] + if xv, ok := extensions[attrib]; ok { + if m, ok := xv.(map[string]interface{}); ok { + m[key] = v + continue + } + // TODO: revisit how we want to bubble errors up. + return ec, fmt.Errorf("failed to process map type extension") + } else { + m := make(map[string]interface{}) + m[key] = v + extensions[attrib] = m + } + } else { + // key + var tmp interface{} + if err := json.Unmarshal([]byte(v[0]), &tmp); err == nil { + extensions[ak] = tmp + } else { + // If we can't unmarshal the data, treat it as a string. + extensions[ak] = v[0] + } + } + } + } + if len(extensions) > 0 { + ec.Extensions = extensions + } + return ec, nil +} + +func (v CodecV02) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { + version := msg.CloudEventsVersion() + if version != cloudevents.CloudEventsVersionV02 { + return Unknown + } + m, ok := msg.(*Message) + if !ok { + return Unknown + } + contentType := m.Header.Get("Content-Type") + if contentType == cloudevents.ApplicationCloudEventsJSON { + return StructuredV02 + } + return BinaryV02 +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go new file mode 100644 index 000000000000..b1236bf81f41 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go @@ -0,0 +1,291 @@ +package http + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" + "github.com/cloudevents/sdk-go/pkg/cloudevents/types" +) + +// CodecV03 represents a http transport codec that uses CloudEvents spec v0.3 +type CodecV03 struct { + CodecStructured + + Encoding Encoding +} + +// Adheres to Codec +var _ transport.Codec = (*CodecV03)(nil) + +// Encode implements Codec.Encode +func (v CodecV03) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + // TODO: wire context + _, r := observability.NewReporter(ctx, CodecObserved{o: reportEncode, c: v.Encoding.Codec()}) + m, err := v.obsEncode(ctx, e) + if err != nil { + r.Error() + } else { + r.OK() + } + return m, err +} + +func (v CodecV03) obsEncode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + switch v.Encoding { + case Default: + fallthrough + case BinaryV03: + return v.encodeBinary(ctx, e) + case StructuredV03: + return v.encodeStructured(ctx, e) + case BatchedV03: + return nil, fmt.Errorf("not implemented") + default: + return nil, fmt.Errorf("unknown encoding: %d", v.Encoding) + } +} + +// Decode implements Codec.Decode +func (v CodecV03) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + // TODO: wire context + _, r := observability.NewReporter(ctx, CodecObserved{o: reportDecode, c: v.inspectEncoding(ctx, msg).Codec()}) // TODO: inspectEncoding is not free. + e, err := v.obsDecode(ctx, msg) + if err != nil { + r.Error() + } else { + r.OK() + } + return e, err +} + +func (v CodecV03) obsDecode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + switch v.inspectEncoding(ctx, msg) { + case BinaryV03: + return v.decodeBinary(ctx, msg) + case StructuredV03: + return v.decodeStructured(ctx, cloudevents.CloudEventsVersionV03, msg) + case BatchedV03: + return nil, fmt.Errorf("not implemented") + default: + return nil, transport.NewErrMessageEncodingUnknown("v03", TransportName) + } +} + +func (v CodecV03) encodeBinary(ctx context.Context, e cloudevents.Event) (transport.Message, error) { + header, err := v.toHeaders(e.Context.AsV03()) + if err != nil { + return nil, err + } + + body, err := e.DataBytes() + if err != nil { + return nil, err + } + + msg := &Message{ + Header: header, + Body: body, + } + + return msg, nil +} + +func (v CodecV03) toHeaders(ec *cloudevents.EventContextV03) (http.Header, error) { + h := http.Header{} + h.Set("ce-specversion", ec.SpecVersion) + h.Set("ce-type", ec.Type) + h.Set("ce-source", ec.Source.String()) + if ec.Subject != nil { + h.Set("ce-subject", *ec.Subject) + } + h.Set("ce-id", ec.ID) + if ec.Time != nil && !ec.Time.IsZero() { + h.Set("ce-time", ec.Time.String()) + } + if ec.SchemaURL != nil { + h.Set("ce-schemaurl", ec.SchemaURL.String()) + } + if ec.DataContentType != nil { + h.Set("Content-Type", *ec.DataContentType) + } else if v.Encoding == Default || v.Encoding == BinaryV03 { + // in binary v0.2, the Content-Type header is tied to ec.ContentType + // This was later found to be an issue with the spec, but yolo. + // TODO: not sure what the default should be? + h.Set("Content-Type", cloudevents.ApplicationJSON) + } + if ec.DataContentEncoding != nil { + h.Set("ce-datacontentencoding", *ec.DataContentEncoding) + } + + for k, v := range ec.Extensions { + // Per spec, map-valued extensions are converted to a list of headers as: + // CE-attrib-key + switch v.(type) { + case string: + h.Set("ce-"+k, v.(string)) + + case map[string]interface{}: + mapVal := v.(map[string]interface{}) + + for subkey, subval := range mapVal { + if subvalstr, ok := v.(string); ok { + h.Set("ce-"+k+"-"+subkey, subvalstr) + continue + } + + encoded, err := json.Marshal(subval) + if err != nil { + return nil, err + } + h.Set("ce-"+k+"-"+subkey, string(encoded)) + } + + default: + encoded, err := json.Marshal(v) + if err != nil { + return nil, err + } + h.Set("ce-"+k, string(encoded)) + } + } + + return h, nil +} + +func (v CodecV03) decodeBinary(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { + m, ok := msg.(*Message) + if !ok { + return nil, fmt.Errorf("failed to convert transport.Message to http.Message") + } + ca, err := v.fromHeaders(m.Header) + if err != nil { + return nil, err + } + var body interface{} + if len(m.Body) > 0 { + body = m.Body + } + return &cloudevents.Event{ + Context: &ca, + Data: body, + DataEncoded: body != nil, + }, nil +} + +func (v CodecV03) fromHeaders(h http.Header) (cloudevents.EventContextV03, error) { + // Normalize headers. + for k, v := range h { + ck := textproto.CanonicalMIMEHeaderKey(k) + if k != ck { + delete(h, k) + h[ck] = v + } + } + + ec := cloudevents.EventContextV03{} + + ec.SpecVersion = h.Get("ce-specversion") + h.Del("ce-specversion") + + ec.ID = h.Get("ce-id") + h.Del("ce-id") + + ec.Type = h.Get("ce-type") + h.Del("ce-type") + + source := types.ParseURLRef(h.Get("ce-source")) + if source != nil { + ec.Source = *source + } + h.Del("ce-source") + + subject := h.Get("ce-subject") + if subject != "" { + ec.Subject = &subject + } + h.Del("ce-subject") + + ec.Time = types.ParseTimestamp(h.Get("ce-time")) + h.Del("ce-time") + + ec.SchemaURL = types.ParseURLRef(h.Get("ce-schemaurl")) + h.Del("ce-schemaurl") + + contentType := h.Get("Content-Type") + if contentType != "" { + ec.DataContentType = &contentType + } + h.Del("Content-Type") + + dataContentEncoding := h.Get("ce-datacontentencoding") + if dataContentEncoding != "" { + ec.DataContentEncoding = &dataContentEncoding + } + h.Del("ce-datacontentencoding") + + // At this point, we have deleted all the known headers. + // Everything left is assumed to be an extension. + + extensions := make(map[string]interface{}) + for k, v := range h { + if len(k) > len("ce-") && strings.EqualFold(k[:len("ce-")], "ce-") { + ak := strings.ToLower(k[len("ce-"):]) + if i := strings.Index(ak, "-"); i > 0 { + // attrib-key + attrib := ak[:i] + key := ak[(i + 1):] + if xv, ok := extensions[attrib]; ok { + if m, ok := xv.(map[string]interface{}); ok { + m[key] = v + continue + } + // TODO: revisit how we want to bubble errors up. + return ec, fmt.Errorf("failed to process map type extension") + } else { + m := make(map[string]interface{}) + m[key] = v + extensions[attrib] = m + } + } else { + // key + var tmp interface{} + if err := json.Unmarshal([]byte(v[0]), &tmp); err == nil { + extensions[ak] = tmp + } else { + // If we can't unmarshal the data, treat it as a string. + extensions[ak] = v[0] + } + } + } + } + if len(extensions) > 0 { + ec.Extensions = extensions + } + return ec, nil +} + +func (v CodecV03) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { + version := msg.CloudEventsVersion() + if version != cloudevents.CloudEventsVersionV03 { + return Unknown + } + m, ok := msg.(*Message) + if !ok { + return Unknown + } + contentType := m.Header.Get("Content-Type") + if contentType == cloudevents.ApplicationCloudEventsJSON { + return StructuredV03 + } + if contentType == cloudevents.ApplicationCloudEventsBatchJSON { + return BatchedV03 + } + return BinaryV03 +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/context.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/context.go new file mode 100644 index 000000000000..cf8b8510d7a4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/context.go @@ -0,0 +1,207 @@ +package http + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +// TransportContext allows a Receiver to understand the context of a request. +type TransportContext struct { + URI string + Host string + Method string + Header http.Header + StatusCode int + + // IgnoreHeaderPrefixes controls what comes back from AttendToHeaders. + // AttendToHeaders controls what is output for .String() + IgnoreHeaderPrefixes []string +} + +// NewTransportContext creates a new TransportContext from a http.Request. +func NewTransportContext(req *http.Request) TransportContext { + var tx *TransportContext + if req != nil { + tx = &TransportContext{ + URI: req.RequestURI, + Host: req.Host, + Method: req.Method, + Header: req.Header, + } + } else { + tx = &TransportContext{} + } + tx.AddIgnoreHeaderPrefix("accept-encoding", "user-agent", "connection", "content-type") + return *tx +} + +// NewTransportContextFromResponse creates a new TransportContext from a http.Response. +// If `res` is nil, it returns a context with a http.StatusInternalServerError status code. +func NewTransportContextFromResponse(res *http.Response) TransportContext { + var tx *TransportContext + if res != nil { + tx = &TransportContext{ + Header: res.Header, + StatusCode: res.StatusCode, + } + } else { + tx = &TransportContext{StatusCode: http.StatusInternalServerError} + } + tx.AddIgnoreHeaderPrefix("accept-encoding", "user-agent", "connection", "content-type") + return *tx +} + +// TransportResponseContext allows a Receiver response with http transport specific fields. +type TransportResponseContext struct { + // Header will be merged with the response headers. + Header http.Header +} + +// AttendToHeaders returns the list of headers that exist in the TransportContext that are not currently in +// tx.IgnoreHeaderPrefix. +func (tx TransportContext) AttendToHeaders() []string { + a := []string(nil) + if tx.Header != nil && len(tx.Header) > 0 { + for k := range tx.Header { + if tx.shouldIgnoreHeader(k) { + continue + } + a = append(a, k) + } + } + return a +} + +func (tx TransportContext) shouldIgnoreHeader(h string) bool { + for _, v := range tx.IgnoreHeaderPrefixes { + if strings.HasPrefix(strings.ToLower(h), strings.ToLower(v)) { + return true + } + } + return false +} + +// String generates a pretty-printed version of the resource as a string. +func (tx TransportContext) String() string { + b := strings.Builder{} + + b.WriteString("Transport Context,\n") + + empty := b.Len() + + if tx.URI != "" { + b.WriteString(" URI: " + tx.URI + "\n") + } + if tx.Host != "" { + b.WriteString(" Host: " + tx.Host + "\n") + } + + if tx.Method != "" { + b.WriteString(" Method: " + tx.Method + "\n") + } + + if tx.StatusCode != 0 { + b.WriteString(" StatusCode: " + strconv.Itoa(tx.StatusCode) + "\n") + } + + if tx.Header != nil && len(tx.Header) > 0 { + b.WriteString(" Header:\n") + for _, k := range tx.AttendToHeaders() { + b.WriteString(fmt.Sprintf(" %s: %s\n", k, tx.Header.Get(k))) + } + } + + if b.Len() == empty { + b.WriteString(" nil\n") + } + + return b.String() +} + +// AddIgnoreHeaderPrefix controls what header key is to be attended to and/or printed. +func (tx *TransportContext) AddIgnoreHeaderPrefix(prefix ...string) { + if tx.IgnoreHeaderPrefixes == nil { + tx.IgnoreHeaderPrefixes = []string(nil) + } + tx.IgnoreHeaderPrefixes = append(tx.IgnoreHeaderPrefixes, prefix...) +} + +// Opaque key type used to store TransportContext +type transportContextKeyType struct{} + +var transportContextKey = transportContextKeyType{} + +// WithTransportContext return a context with the given TransportContext into the provided context object. +func WithTransportContext(ctx context.Context, tcxt TransportContext) context.Context { + return context.WithValue(ctx, transportContextKey, tcxt) +} + +// TransportContextFrom pulls a TransportContext out of a context. Always +// returns a non-nil object. +func TransportContextFrom(ctx context.Context) TransportContext { + tctx := ctx.Value(transportContextKey) + if tctx != nil { + if tx, ok := tctx.(TransportContext); ok { + return tx + } + if tx, ok := tctx.(*TransportContext); ok { + return *tx + } + } + return TransportContext{} +} + +// Opaque key type used to store Headers +type headerKeyType struct{} + +var headerKey = headerKeyType{} + +// ContextWithHeader returns a context with a header added to the given context. +// Can be called multiple times to set multiple header key/value pairs. +func ContextWithHeader(ctx context.Context, key, value string) context.Context { + header := HeaderFrom(ctx) + header.Add(key, value) + return context.WithValue(ctx, headerKey, header) +} + +// HeaderFrom extracts the header object in the given context. Always returns a non-nil Header. +func HeaderFrom(ctx context.Context) http.Header { + ch := http.Header{} + header := ctx.Value(headerKey) + if header != nil { + if h, ok := header.(http.Header); ok { + copyHeaders(h, ch) + } + } + return ch +} + +// Opaque key type used to store long poll target. +type longPollTargetKeyType struct{} + +var longPollTargetKey = longPollTargetKeyType{} + +// WithLongPollTarget returns a new context with the given long poll target. +// `target` should be a full URL and will be injected into the long polling +// http request within StartReceiver. +func ContextWithLongPollTarget(ctx context.Context, target string) context.Context { + return context.WithValue(ctx, longPollTargetKey, target) +} + +// LongPollTargetFrom looks in the given context and returns `target` as a +// parsed url if found and valid, otherwise nil. +func LongPollTargetFrom(ctx context.Context) *url.URL { + c := ctx.Value(longPollTargetKey) + if c != nil { + if s, ok := c.(string); ok && s != "" { + if target, err := url.Parse(s); err == nil { + return target + } + } + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/doc.go new file mode 100644 index 000000000000..1a171e46e1ed --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/doc.go @@ -0,0 +1,4 @@ +/* +Package http implements the CloudEvent transport implementation using HTTP. +*/ +package http diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/encoding.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/encoding.go new file mode 100644 index 000000000000..a0d80c49485f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/encoding.go @@ -0,0 +1,178 @@ +package http + +import ( + "context" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" +) + +// Encoding to use for HTTP transport. +type Encoding int32 + +type EncodingSelector func(context.Context, cloudevents.Event) Encoding + +const ( + // Default + Default Encoding = iota + // BinaryV01 is Binary CloudEvents spec v0.1. + BinaryV01 + // StructuredV01 is Structured CloudEvents spec v0.1. + StructuredV01 + // BinaryV02 is Binary CloudEvents spec v0.2. + BinaryV02 + // StructuredV02 is Structured CloudEvents spec v0.2. + StructuredV02 + // BinaryV03 is Binary CloudEvents spec v0.3. + BinaryV03 + // StructuredV03 is Structured CloudEvents spec v0.3. + StructuredV03 + // BatchedV03 is Batched CloudEvents spec v0.3. + BatchedV03 + // Unknown is unknown. + Unknown + + // Binary is used for Context Based Encoding Selections to use the + // DefaultBinaryEncodingSelectionStrategy + Binary = "binary" + + // Structured is used for Context Based Encoding Selections to use the + // DefaultStructuredEncodingSelectionStrategy + Structured = "structured" +) + +func ContextBasedEncodingSelectionStrategy(ctx context.Context, e cloudevents.Event) Encoding { + encoding := cecontext.EncodingFrom(ctx) + switch encoding { + case "", Binary: + return DefaultBinaryEncodingSelectionStrategy(ctx, e) + case Structured: + return DefaultStructuredEncodingSelectionStrategy(ctx, e) + } + return Default +} + +// DefaultBinaryEncodingSelectionStrategy implements a selection process for +// which binary encoding to use based on spec version of the event. +func DefaultBinaryEncodingSelectionStrategy(ctx context.Context, e cloudevents.Event) Encoding { + switch e.SpecVersion() { + case cloudevents.CloudEventsVersionV01: + return BinaryV01 + case cloudevents.CloudEventsVersionV02: + return BinaryV02 + case cloudevents.CloudEventsVersionV03: + return BinaryV03 + } + // Unknown version, return Default. + return Default +} + +// DefaultStructuredEncodingSelectionStrategy implements a selection process +// for which structured encoding to use based on spec version of the event. +func DefaultStructuredEncodingSelectionStrategy(ctx context.Context, e cloudevents.Event) Encoding { + switch e.SpecVersion() { + case cloudevents.CloudEventsVersionV01: + return StructuredV01 + case cloudevents.CloudEventsVersionV02: + return StructuredV02 + case cloudevents.CloudEventsVersionV03: + return StructuredV03 + } + // Unknown version, return Default. + return Default +} + +// String pretty-prints the encoding as a string. +func (e Encoding) String() string { + switch e { + case Default: + return "Default Encoding " + e.Version() + + // Binary + case BinaryV01: + fallthrough + case BinaryV02: + fallthrough + case BinaryV03: + return "Binary Encoding " + e.Version() + + // Structured + case StructuredV01: + fallthrough + case StructuredV02: + fallthrough + case StructuredV03: + return "Structured Encoding " + e.Version() + + // Batched + case BatchedV03: + return "Batched Encoding " + e.Version() + + default: + return "Unknown Encoding" + } +} + +// Version pretty-prints the encoding version as a string. +func (e Encoding) Version() string { + switch e { + case Default: + return "Default" + + // Version 0.1 + case BinaryV01: + fallthrough + case StructuredV01: + return "v0.1" + + // Version 0.2 + case BinaryV02: + fallthrough + case StructuredV02: + return "v0.2" + + // Version 0.3 + case BinaryV03: + fallthrough + case StructuredV03: + fallthrough + case BatchedV03: + return "v0.3" + + // Unknown + default: + return "Unknown" + } +} + +// Codec creates a structured string to represent the the codec version. +func (e Encoding) Codec() string { + switch e { + case Default: + return "default" + + // Version 0.1 + case BinaryV01: + return "binary/v0.1" + case StructuredV01: + return "structured/v0.1" + + // Version 0.2 + case BinaryV02: + return "binary/v0.2" + case StructuredV02: + return "structured/v0.2" + + // Version 0.3 + case BinaryV03: + return "binary/v0.3" + case StructuredV03: + return "structured/v0.3" + case BatchedV03: + return "batched/v0.3" + + // Unknown + default: + return "unknown" + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/message.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/message.go new file mode 100644 index 000000000000..a6cdbecb1c6b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/message.go @@ -0,0 +1,148 @@ +package http + +import ( + "bytes" + "encoding/json" + + "io" + "io/ioutil" + "net/http" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" +) + +// type check that this transport message impl matches the contract +var _ transport.Message = (*Message)(nil) + +// Message is an http transport message. +type Message struct { + Header http.Header + Body []byte +} + +// Response is an http transport response. +type Response struct { + StatusCode int + Message +} + +// CloudEventsVersion inspects a message and tries to discover and return the +// CloudEvents spec version. +func (m Message) CloudEventsVersion() string { + + // TODO: the impl of this method needs to move into the codec. + + if m.Header != nil { + // Try headers first. + // v0.1, cased from the spec + // Note: don't pass literal string direct to m.Header[] so that + // go vet won't complain about non-canonical case. + name := "CE-CloudEventsVersion" + if v := m.Header[name]; len(v) == 1 { + return v[0] + } + // v0.2, canonical casing + if ver := m.Header.Get("CE-CloudEventsVersion"); ver != "" { + return ver + } + + // v0.2, cased from the spec + name = "ce-specversion" + if v := m.Header[name]; len(v) == 1 { + return v[0] + } + // v0.2, canonical casing + name = "ce-specversion" + if ver := m.Header.Get(name); ver != "" { + return ver + } + } + + // Then try the data body. + // TODO: we need to use the correct decoding based on content type. + + raw := make(map[string]json.RawMessage) + if err := json.Unmarshal(m.Body, &raw); err != nil { + return "" + } + + // v0.1 + if v, ok := raw["cloudEventsVersion"]; ok { + var version string + if err := json.Unmarshal(v, &version); err != nil { + return "" + } + return version + } + + // v0.2 + if v, ok := raw["specversion"]; ok { + var version string + if err := json.Unmarshal(v, &version); err != nil { + return "" + } + return version + } + + return "" +} + +func readAllClose(r io.ReadCloser) ([]byte, error) { + if r != nil { + defer r.Close() + return ioutil.ReadAll(r) + } + return nil, nil +} + +// NewMessage creates a new message from the Header and Body of +// an http.Request or http.Response +func NewMessage(header http.Header, body io.ReadCloser) (*Message, error) { + var m Message + err := m.Init(header, body) + return &m, err +} + +// NewResponse creates a new response from the Header and Body of +// an http.Request or http.Response +func NewResponse(header http.Header, body io.ReadCloser, statusCode int) (*Response, error) { + resp := Response{StatusCode: statusCode} + err := resp.Init(header, body) + return &resp, err +} + +// Copy copies a new Body and Header into a message, replacing any previous data. +func (m *Message) Init(header http.Header, body io.ReadCloser) error { + m.Header = make(http.Header, len(header)) + copyHeadersEnsure(header, &m.Header) + var err error + m.Body, err = readAllClose(body) + return err +} + +func (m *Message) copyOut(header *http.Header, body *io.ReadCloser) { + copyHeadersEnsure(m.Header, header) + *body = nil + if m.Body != nil { + copy := append([]byte(nil), m.Body...) + *body = ioutil.NopCloser(bytes.NewBuffer(copy)) + } +} + +// ToRequest updates a http.Request from a Message. +// Replaces Body, ContentLength and Method, updates Headers. +// Panic if req is nil +func (m *Message) ToRequest(req *http.Request) { + m.copyOut(&req.Header, &req.Body) + req.ContentLength = int64(len(m.Body)) + req.Method = http.MethodPost +} + +// ToResponse updates a http.Response from a Response. +// Replaces Body, updates Headers. +// Panic if resp is nil +func (m *Response) ToResponse(resp *http.Response) { + m.copyOut(&resp.Header, &resp.Body) + resp.ContentLength = int64(len(m.Body)) + resp.StatusCode = m.StatusCode +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/observability.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/observability.go new file mode 100644 index 000000000000..1da56dc2ad5d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/observability.go @@ -0,0 +1,109 @@ +package http + +import ( + "fmt" + + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var ( + // LatencyMs measures the latency in milliseconds for the http transport + // methods for CloudEvents. + LatencyMs = stats.Float64( + "cloudevents.io/sdk-go/transport/http/latency", + "The latency in milliseconds for the http transport methods for CloudEvents.", + "ms") +) + +var ( + // LatencyView is an OpenCensus view that shows http transport method latency. + LatencyView = &view.View{ + Name: "transport/http/latency", + Measure: LatencyMs, + Description: "The distribution of latency inside of http transport for CloudEvents.", + Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), + TagKeys: observability.LatencyTags(), + } +) + +type observed int32 + +// Adheres to Observable +var _ observability.Observable = observed(0) + +const ( + reportSend observed = iota + reportReceive + reportServeHTTP + reportEncode + reportDecode +) + +// TraceName implements Observable.TraceName +func (o observed) TraceName() string { + switch o { + case reportSend: + return "transport/http/send" + case reportReceive: + return "transport/http/receive" + case reportServeHTTP: + return "transport/http/servehttp" + case reportEncode: + return "transport/http/encode" + case reportDecode: + return "transport/http/decode" + default: + return "transport/http/unknown" + } +} + +// MethodName implements Observable.MethodName +func (o observed) MethodName() string { + switch o { + case reportSend: + return "send" + case reportReceive: + return "receive" + case reportServeHTTP: + return "servehttp" + case reportEncode: + return "encode" + case reportDecode: + return "decode" + default: + return "unknown" + } +} + +// LatencyMs implements Observable.LatencyMs +func (o observed) LatencyMs() *stats.Float64Measure { + return LatencyMs +} + +// CodecObserved is a wrapper to append version to observed. +type CodecObserved struct { + // Method + o observed + // Codec + c string +} + +// Adheres to Observable +var _ observability.Observable = (*CodecObserved)(nil) + +// TraceName implements Observable.TraceName +func (c CodecObserved) TraceName() string { + return fmt.Sprintf("%s/%s", c.o.TraceName(), c.c) +} + +// MethodName implements Observable.MethodName +func (c CodecObserved) MethodName() string { + return fmt.Sprintf("%s/%s", c.o.MethodName(), c.c) +} + +// LatencyMs implements Observable.LatencyMs +func (c CodecObserved) LatencyMs() *stats.Float64Measure { + return c.o.LatencyMs() +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/options.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/options.go new file mode 100644 index 000000000000..c6e0c20df3b2 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/options.go @@ -0,0 +1,266 @@ +package http + +import ( + "fmt" + "net" + nethttp "net/http" + "net/url" + "strings" + "time" +) + +// Option is the function signature required to be considered an http.Option. +type Option func(*Transport) error + +// WithTarget sets the outbound recipient of cloudevents when using an HTTP +// request. +func WithTarget(targetUrl string) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http target option can not set nil transport") + } + targetUrl = strings.TrimSpace(targetUrl) + if targetUrl != "" { + var err error + var target *url.URL + target, err = url.Parse(targetUrl) + if err != nil { + return fmt.Errorf("http target option failed to parse target url: %s", err.Error()) + } + + if t.Req == nil { + t.Req = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + t.Req.URL = target + return nil + } + return fmt.Errorf("http target option was empty string") + } +} + +// WithMethod sets the outbound recipient of cloudevents when using an HTTP +// request. +func WithMethod(method string) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http method option can not set nil transport") + } + method = strings.TrimSpace(method) + if method != "" { + if t.Req == nil { + t.Req = &nethttp.Request{} + } + t.Req.Method = method + return nil + } + return fmt.Errorf("http method option was empty string") + } +} + +// WithHeader sets an additional default outbound header for all cloudevents +// when using an HTTP request. +func WithHeader(key, value string) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http header option can not set nil transport") + } + key = strings.TrimSpace(key) + if key != "" { + if t.Req == nil { + t.Req = &nethttp.Request{} + } + if t.Req.Header == nil { + t.Req.Header = nethttp.Header{} + } + t.Req.Header.Add(key, value) + return nil + } + return fmt.Errorf("http header option was empty string") + } +} + +// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. +func WithShutdownTimeout(timeout time.Duration) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http shutdown timeout option can not set nil transport") + } + t.ShutdownTimeout = &timeout + return nil + } +} + +// WithEncoding sets the encoding for clients with HTTP transports. +func WithEncoding(encoding Encoding) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http encoding option can not set nil transport") + } + t.Encoding = encoding + return nil + } +} + +// WithDefaultEncodingSelector sets the encoding selection strategy for +// default encoding selections based on Event. +func WithDefaultEncodingSelector(fn EncodingSelector) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http default encoding selector option can not set nil transport") + } + if fn != nil { + t.DefaultEncodingSelectionFn = fn + return nil + } + return fmt.Errorf("http fn for DefaultEncodingSelector was nil") + } +} + +// WithContextBasedEncoding sets the encoding selection strategy for +// default encoding selections based context and then on Event, the encoded +// event will be the given version in the encoding specified by the given +// context, or Binary if not set. +func WithContextBasedEncoding() Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http context based encoding option can not set nil transport") + } + + t.DefaultEncodingSelectionFn = ContextBasedEncodingSelectionStrategy + return nil + } +} + +// WithBinaryEncoding sets the encoding selection strategy for +// default encoding selections based on Event, the encoded event will be the +// given version in Binary form. +func WithBinaryEncoding() Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http binary encoding option can not set nil transport") + } + + t.DefaultEncodingSelectionFn = DefaultBinaryEncodingSelectionStrategy + return nil + } +} + +// WithStructuredEncoding sets the encoding selection strategy for +// default encoding selections based on Event, the encoded event will be the +// given version in Structured form. +func WithStructuredEncoding() Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http structured encoding option can not set nil transport") + } + + t.DefaultEncodingSelectionFn = DefaultStructuredEncodingSelectionStrategy + return nil + } +} + +func checkListen(t *Transport, prefix string) error { + switch { + case t.Port != nil: + return fmt.Errorf("%v port already set", prefix) + case t.listener != nil: + return fmt.Errorf("%v listener already set", prefix) + } + return nil +} + +// WithPort sets the listening port for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithPort(port int) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http port option can not set nil transport") + } + if port < 0 { + return fmt.Errorf("http port option was given an invalid port: %d", port) + } + if err := checkListen(t, "http port option"); err != nil { + return err + } + t.setPort(port) + return nil + } +} + +// WithListener sets the listener for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithListener(l net.Listener) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http listener option can not set nil transport") + } + if err := checkListen(t, "http port option"); err != nil { + return err + } + t.listener = l + _, err := t.listen() + return err + } +} + +// WithPath sets the path to receive cloudevents on for HTTP transports. +func WithPath(path string) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http path option can not set nil transport") + } + path = strings.TrimSpace(path) + if len(path) == 0 { + return fmt.Errorf("http path option was given an invalid path: %q", path) + } + t.Path = path + return nil + } +} + +// Middleware is a function that takes an existing http.Handler and wraps it in middleware, +// returning the wrapped http.Handler. +type Middleware func(next nethttp.Handler) nethttp.Handler + +// WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times. +// Middleware is applied to everything before it. For example +// `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. +func WithMiddleware(middleware Middleware) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http middleware option can not set nil transport") + } + t.middleware = append(t.middleware, middleware) + return nil + } +} + +// WithLongPollTarget sets the receivers URL to perform long polling after +// StartReceiver is called. +func WithLongPollTarget(targetUrl string) Option { + return func(t *Transport) error { + if t == nil { + return fmt.Errorf("http long poll target option can not set nil transport") + } + targetUrl = strings.TrimSpace(targetUrl) + if targetUrl != "" { + var err error + var target *url.URL + target, err = url.Parse(targetUrl) + if err != nil { + return fmt.Errorf("http long poll target option failed to parse target url: %s", err.Error()) + } + + if t.LongPollReq == nil { + t.LongPollReq = &nethttp.Request{ + Method: nethttp.MethodGet, + } + } + t.LongPollReq.URL = target + return nil + } + return fmt.Errorf("http long poll target option was empty string") + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go new file mode 100644 index 000000000000..7a1548a4326c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go @@ -0,0 +1,649 @@ +package http + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" + cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" + "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" +) + +// Transport adheres to transport.Transport. +var _ transport.Transport = (*Transport)(nil) + +const ( + // DefaultShutdownTimeout defines the default timeout given to the http.Server when calling Shutdown. + DefaultShutdownTimeout = time.Minute * 1 + + // TransportName is the name of this transport. + TransportName = "HTTP" +) + +// Transport acts as both a http client and a http handler. +type Transport struct { + // The encoding used to select the codec for outbound events. + Encoding Encoding + + // DefaultEncodingSelectionFn allows for other encoding selection strategies to be injected. + DefaultEncodingSelectionFn EncodingSelector + + // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. + // If nil, DefaultShutdownTimeout is used. + ShutdownTimeout *time.Duration + + // Sending + + // Client is the http client that will be used to send requests. + // If nil, the Transport will create a one. + Client *http.Client + // Req is the base http request that is used for http.Do. + // Only .Method, .URL, .Close, and .Header is considered. + // If not set, Req.Method defaults to POST. + // Req.URL or context.WithTarget(url) are required for sending. + Req *http.Request + + // Receiving + + // Receiver is invoked target for incoming events. + Receiver transport.Receiver + // Converter is invoked if the incoming transport receives an undecodable + // message. + Converter transport.Converter + // Port is the port to bind the receiver to. Defaults to 8080. + Port *int + // Path is the path to bind the receiver to. Defaults to "/". + Path string + // Handler is the handler the http Server will use. Use this to reuse the + // http server. If nil, the Transport will create a one. + Handler *http.ServeMux + + // LongPollClient is the http client that will be used to long poll. + // If nil and LongPollReq is set, the Transport will create a one. + LongPollClient *http.Client + // LongPollReq is the base http request that is used for long poll. + // Only .Method, .URL, .Close, and .Header is considered. + // If not set, LongPollReq.Method defaults to GET. + // LongPollReq.URL or context.WithLongPollTarget(url) are required to long + // poll on StartReceiver. + LongPollReq *http.Request + + listener net.Listener + server *http.Server + handlerRegistered bool + codec transport.Codec + // Create Mutex + crMu sync.Mutex + // Receive Mutex + reMu sync.Mutex + + middleware []Middleware +} + +func New(opts ...Option) (*Transport, error) { + t := &Transport{ + Req: &http.Request{ + Method: http.MethodPost, + }, + } + if err := t.applyOptions(opts...); err != nil { + return nil, err + } + return t, nil +} + +func (t *Transport) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(t); err != nil { + return err + } + } + return nil +} + +func (t *Transport) loadCodec(ctx context.Context) bool { + if t.codec == nil { + t.crMu.Lock() + if t.DefaultEncodingSelectionFn != nil && t.Encoding != Default { + logger := cecontext.LoggerFrom(ctx) + logger.Warn("transport has a DefaultEncodingSelectionFn set but Encoding is not Default. DefaultEncodingSelectionFn will be ignored.") + + t.codec = &Codec{ + Encoding: t.Encoding, + } + } else { + t.codec = &Codec{ + Encoding: t.Encoding, + DefaultEncodingSelectionFn: t.DefaultEncodingSelectionFn, + } + } + t.crMu.Unlock() + } + return true +} + +func copyHeaders(from, to http.Header) { + if from == nil || to == nil { + return + } + for header, values := range from { + for _, value := range values { + to.Add(header, value) + } + } +} + +// Ensure to is a non-nil map before copying +func copyHeadersEnsure(from http.Header, to *http.Header) { + if len(from) > 0 { + if *to == nil { + *to = http.Header{} + } + copyHeaders(from, *to) + } +} + +// Send implements Transport.Send +func (t *Transport) Send(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) { + ctx, r := observability.NewReporter(ctx, reportSend) + rctx, resp, err := t.obsSend(ctx, event) + if err != nil { + r.Error() + } else { + r.OK() + } + return rctx, resp, err +} + +func (t *Transport) obsSend(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) { + if t.Client == nil { + t.crMu.Lock() + t.Client = &http.Client{} + t.crMu.Unlock() + } + + req := http.Request{ + Header: HeaderFrom(ctx), + } + if t.Req != nil { + req.Method = t.Req.Method + req.URL = t.Req.URL + req.Close = t.Req.Close + copyHeadersEnsure(t.Req.Header, &req.Header) + } + + // Override the default request with target from context. + if target := cecontext.TargetFrom(ctx); target != nil { + req.URL = target + } + + if ok := t.loadCodec(ctx); !ok { + return WithTransportContext(ctx, NewTransportContextFromResponse(nil)), nil, fmt.Errorf("unknown encoding set on transport: %d", t.Encoding) + } + + msg, err := t.codec.Encode(ctx, event) + if err != nil { + return WithTransportContext(ctx, NewTransportContextFromResponse(nil)), nil, err + } + + if m, ok := msg.(*Message); ok { + m.ToRequest(&req) + return httpDo(ctx, t.Client, &req, func(resp *http.Response, err error) (context.Context, *cloudevents.Event, error) { + rctx := WithTransportContext(ctx, NewTransportContextFromResponse(resp)) + if err != nil { + return rctx, nil, err + } + defer resp.Body.Close() + + body, _ := ioutil.ReadAll(resp.Body) + respEvent, err := t.MessageToEvent(ctx, &Message{ + Header: resp.Header, + Body: body, + }) + if err != nil { + isErr := true + if txerr, ok := err.(*transport.ErrTransportMessageConversion); ok { + if !txerr.IsFatal() { + isErr = false + } + } + if isErr { + return rctx, nil, err + } + } + if accepted(resp) { + return rctx, respEvent, nil + } + return rctx, respEvent, fmt.Errorf("error sending cloudevent: %s", resp.Status) + }) + } + return WithTransportContext(ctx, NewTransportContextFromResponse(nil)), nil, fmt.Errorf("failed to encode Event into a Message") +} + +func (t *Transport) MessageToEvent(ctx context.Context, msg *Message) (*cloudevents.Event, error) { + logger := cecontext.LoggerFrom(ctx) + var event *cloudevents.Event + var err error + + if msg.CloudEventsVersion() != "" { + // This is likely a cloudevents encoded message, try to decode it. + if ok := t.loadCodec(ctx); !ok { + err = transport.NewErrTransportMessageConversion("http", fmt.Sprintf("unknown encoding set on transport: %d", t.Encoding), true) + logger.Error("failed to load codec", zap.Error(err)) + } else { + event, err = t.codec.Decode(ctx, msg) + } + } else { + err = transport.NewErrTransportMessageConversion("http", "cloudevents version unknown", false) + } + + // If codec returns and error, or could not load the correct codec, try + // with the converter if it is set. + if err != nil && t.HasConverter() { + event, err = t.Converter.Convert(ctx, msg, err) + } + // If err is still set, it means that there was no converter, or the + // converter failed to convert. + if err != nil { + logger.Debug("failed to decode message", zap.Error(err)) + } + + return event, err +} + +// SetReceiver implements Transport.SetReceiver +func (t *Transport) SetReceiver(r transport.Receiver) { + t.Receiver = r +} + +// SetConverter implements Transport.SetConverter +func (t *Transport) SetConverter(c transport.Converter) { + t.Converter = c +} + +// HasConverter implements Transport.HasConverter +func (t *Transport) HasConverter() bool { + return t.Converter != nil +} + +// StartReceiver implements Transport.StartReceiver +// NOTE: This is a blocking call. +func (t *Transport) StartReceiver(ctx context.Context) error { + t.reMu.Lock() + defer t.reMu.Unlock() + + if t.LongPollReq != nil { + go func() { _ = t.longPollStart(ctx) }() + } + + if t.Handler == nil { + t.Handler = http.NewServeMux() + } + if !t.handlerRegistered { + // handler.Handle might panic if the user tries to use the same path as the sdk. + t.Handler.Handle(t.GetPath(), t) + t.handlerRegistered = true + } + + addr, err := t.listen() + if err != nil { + return err + } + + t.server = &http.Server{ + Addr: addr.String(), + Handler: attachMiddleware(t.Handler, t.middleware), + } + + // Shutdown + defer func() { + t.server.Close() + t.server = nil + }() + + errChan := make(chan error, 1) + go func() { + errChan <- t.server.Serve(t.listener) + }() + + // wait for the server to return or ctx.Done(). + select { + case <-ctx.Done(): + // Try a gracefully shutdown. + timeout := DefaultShutdownTimeout + if t.ShutdownTimeout != nil { + timeout = *t.ShutdownTimeout + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := t.server.Shutdown(ctx) + <-errChan // Wait for server goroutine to exit + return err + case err := <-errChan: + return err + } +} + +func (t *Transport) longPollStart(ctx context.Context) error { + logger := cecontext.LoggerFrom(ctx) + logger.Info("starting long poll receiver") + + if t.LongPollClient == nil { + t.crMu.Lock() + t.LongPollClient = &http.Client{} + t.crMu.Unlock() + } + req := &http.Request{ + // TODO: decide if it is ok to use HeaderFrom context here. + Header: HeaderFrom(ctx), + } + if t.LongPollReq != nil { + req.Method = t.LongPollReq.Method + req.URL = t.LongPollReq.URL + req.Close = t.LongPollReq.Close + copyHeaders(t.LongPollReq.Header, req.Header) + } + + // Override the default request with target from context. + if target := LongPollTargetFrom(ctx); target != nil { + req.URL = target + } + + if req.URL == nil { + return errors.New("no long poll target found") + } + + req = req.WithContext(ctx) + msgCh := make(chan Message) + defer close(msgCh) + + go func(ch chan<- Message) { + for { + if resp, err := t.LongPollClient.Do(req); err != nil { + logger.Errorw("long poll request returned error", err) + uErr := err.(*url.Error) + if uErr.Temporary() || uErr.Timeout() { + continue + } + // TODO: if the transport is throwing errors, we might want to try again. Maybe with a back-off sleep. + // But this error also might be that there was a done on the context. + } else if resp.StatusCode == http.StatusNotModified { + // Keep polling. + continue + } else if resp.StatusCode == http.StatusOK { + body, _ := ioutil.ReadAll(resp.Body) + if err := resp.Body.Close(); err != nil { + logger.Warnw("error closing long poll response body", zap.Error(err)) + } + msg := Message{ + Header: resp.Header, + Body: body, + } + msgCh <- msg + } else { + // TODO: not sure what to do with upstream errors yet. + logger.Errorw("unhandled long poll response", zap.Any("resp", resp)) + } + } + }(msgCh) + + // Attach the long poll request context to the context. + ctx = WithTransportContext(ctx, TransportContext{ + URI: req.URL.RequestURI(), + Host: req.URL.Host, + Method: req.Method, + }) + + for { + select { + case <-ctx.Done(): + return nil + case msg := <-msgCh: + logger.Debug("got a message", zap.Any("msg", msg)) + if event, err := t.MessageToEvent(ctx, &msg); err != nil { + logger.Errorw("could not convert http message to event", zap.Error(err)) + } else { + logger.Debugw("got an event", zap.Any("event", event)) + // TODO: deliver event. + if _, err := t.invokeReceiver(ctx, *event); err != nil { + logger.Errorw("could not invoke receiver event", zap.Error(err)) + } + } + } + } +} + +// attachMiddleware attaches the HTTP middleware to the specified handler. +func attachMiddleware(h http.Handler, middleware []Middleware) http.Handler { + for _, m := range middleware { + h = m(h) + } + return h +} + +type eventError struct { + ctx context.Context + event *cloudevents.Event + err error +} + +func httpDo(ctx context.Context, client *http.Client, req *http.Request, fn func(*http.Response, error) (context.Context, *cloudevents.Event, error)) (context.Context, *cloudevents.Event, error) { + // Run the HTTP request in a goroutine and pass the response to fn. + c := make(chan eventError, 1) + req = req.WithContext(ctx) + go func() { + rctx, event, err := fn(client.Do(req)) + c <- eventError{ctx: rctx, event: event, err: err} + }() + select { + case <-ctx.Done(): + return ctx, nil, ctx.Err() + case ee := <-c: + return ee.ctx, ee.event, ee.err + } +} + +// accepted is a helper method to understand if the response from the target +// accepted the CloudEvent. +func accepted(resp *http.Response) bool { + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return true + } + return false +} + +func (t *Transport) invokeReceiver(ctx context.Context, event cloudevents.Event) (*Response, error) { + ctx, r := observability.NewReporter(ctx, reportReceive) + resp, err := t.obsInvokeReceiver(ctx, event) + if err != nil { + r.Error() + } else { + r.OK() + } + return resp, err +} + +func (t *Transport) obsInvokeReceiver(ctx context.Context, event cloudevents.Event) (*Response, error) { + logger := cecontext.LoggerFrom(ctx) + if t.Receiver != nil { + // Note: http does not use eventResp.Reason + eventResp := cloudevents.EventResponse{} + resp := Response{} + + err := t.Receiver.Receive(ctx, event, &eventResp) + if err != nil { + logger.Warnw("got an error from receiver fn", zap.Error(err)) + resp.StatusCode = http.StatusInternalServerError + return &resp, err + } + + if eventResp.Event != nil { + if t.loadCodec(ctx) { + if m, err := t.codec.Encode(ctx, *eventResp.Event); err != nil { + logger.Errorw("failed to encode response from receiver fn", zap.Error(err)) + } else if msg, ok := m.(*Message); ok { + resp.Message = *msg + } + } else { + logger.Error("failed to load codec") + resp.StatusCode = http.StatusInternalServerError + return &resp, err + } + // Look for a transport response context + var trx *TransportResponseContext + if ptrTrx, ok := eventResp.Context.(*TransportResponseContext); ok { + // found a *TransportResponseContext, use it. + trx = ptrTrx + } else if realTrx, ok := eventResp.Context.(TransportResponseContext); ok { + // found a TransportResponseContext, make it a pointer. + trx = &realTrx + } + // If we found a TransportResponseContext, use it. + if trx != nil && trx.Header != nil && len(trx.Header) > 0 { + copyHeadersEnsure(trx.Header, &resp.Message.Header) + } + } + + if eventResp.Status != 0 { + resp.StatusCode = eventResp.Status + } else { + resp.StatusCode = http.StatusAccepted // default is 202 - Accepted + } + return &resp, err + } + return nil, nil +} + +// ServeHTTP implements http.Handler +func (t *Transport) ServeHTTP(w http.ResponseWriter, req *http.Request) { + ctx, r := observability.NewReporter(req.Context(), reportServeHTTP) + // Add the transport context to ctx. + ctx = WithTransportContext(ctx, NewTransportContext(req)) + logger := cecontext.LoggerFrom(ctx) + + body, err := ioutil.ReadAll(req.Body) + if err != nil { + logger.Errorw("failed to handle request", zap.Error(err)) + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(`{"error":"Invalid request"}`)) + r.Error() + return + } + + event, err := t.MessageToEvent(ctx, &Message{ + Header: req.Header, + Body: body, + }) + if err != nil { + isFatal := true + if txerr, ok := err.(*transport.ErrTransportMessageConversion); ok { + isFatal = txerr.IsFatal() + } + if isFatal || event == nil { + logger.Errorw("failed to convert http message to event", zap.Error(err)) + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(fmt.Sprintf(`{"error":%q}`, err.Error()))) + r.Error() + return + } + } + + resp, err := t.invokeReceiver(ctx, *event) + if err != nil { + logger.Warnw("error returned from invokeReceiver", zap.Error(err)) + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(fmt.Sprintf(`{"error":%q}`, err.Error()))) + r.Error() + return + } + + if resp != nil { + if t.Req != nil { + copyHeaders(t.Req.Header, w.Header()) + } + if len(resp.Message.Header) > 0 { + copyHeaders(resp.Message.Header, w.Header()) + } + + status := http.StatusAccepted + if resp.StatusCode >= 200 && resp.StatusCode < 600 { + status = resp.StatusCode + } + w.Header().Add("Content-Length", strconv.Itoa(len(resp.Message.Body))) + w.WriteHeader(status) + + if len(resp.Message.Body) > 0 { + if _, err := w.Write(resp.Message.Body); err != nil { + r.Error() + return + } + } + + r.OK() + return + } + + w.WriteHeader(http.StatusNoContent) + r.OK() +} + +// GetPort returns the listening port. +// Returns -1 if there is a listening error. +// Note this will call net.Listen() if the listener is not already started. +func (t *Transport) GetPort() int { + // Ensure we have a listener and therefore a port. + if _, err := t.listen(); err == nil || t.Port != nil { + return *t.Port + } + return -1 +} + +func (t *Transport) setPort(port int) { + if t.Port == nil { + t.Port = new(int) + } + *t.Port = port +} + +// listen if not already listening, update t.Port +func (t *Transport) listen() (net.Addr, error) { + if t.listener == nil { + port := 8080 + if t.Port != nil { + port = *t.Port + } + var err error + if t.listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port)); err != nil { + return nil, err + } + } + addr := t.listener.Addr() + if tcpAddr, ok := addr.(*net.TCPAddr); ok { + t.setPort(tcpAddr.Port) + } + return addr, nil +} + +// GetPath returns the path the transport is hosted on. If the path is '/', +// the transport will handle requests on any URI. To discover the true path +// a request was received on, inspect the context from Receive(cxt, ...) with +// TransportContextFrom(ctx). +func (t *Transport) GetPath() string { + path := strings.TrimSpace(t.Path) + if len(path) > 0 { + return path + } + return "/" // default +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/message.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/message.go new file mode 100644 index 000000000000..e2ed55c970fd --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/message.go @@ -0,0 +1,9 @@ +package transport + +// Message is the abstract transport message wrapper. +type Message interface { + // CloudEventsVersion returns the version of the CloudEvent. + CloudEventsVersion() string + + // TODO maybe get encoding +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/transport.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/transport.go new file mode 100644 index 000000000000..a08d5a12e525 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/transport.go @@ -0,0 +1,44 @@ +package transport + +import ( + "context" + + "github.com/cloudevents/sdk-go/pkg/cloudevents" +) + +// Transport is the interface for transport sender to send the converted Message +// over the underlying transport. +type Transport interface { + Send(context.Context, cloudevents.Event) (context.Context, *cloudevents.Event, error) + + SetReceiver(Receiver) + StartReceiver(context.Context) error + + // SetConverter sets the delegate to use for converting messages that have + // failed to be decoded from known codecs for this transport. + SetConverter(Converter) + // HasConverter is true when a non-nil converter has been set. + HasConverter() bool +} + +// Receiver is an interface to define how a transport will invoke a listener +// of incoming events. +type Receiver interface { + Receive(context.Context, cloudevents.Event, *cloudevents.EventResponse) error +} + +// ReceiveFunc wraps a function as a Receiver object. +type ReceiveFunc func(ctx context.Context, e cloudevents.Event, er *cloudevents.EventResponse) error + +// Receive implements Receiver.Receive +func (f ReceiveFunc) Receive(ctx context.Context, e cloudevents.Event, er *cloudevents.EventResponse) error { + return f(ctx, e, er) +} + +// Converter is an interface to define how a transport delegate to convert an +// non-understood transport message from the internal codecs. Providing a +// Converter allows incoming requests to be bridged to CloudEvents format if +// they have not been sent as an event in CloudEvents format. +type Converter interface { + Convert(context.Context, Message, error) (*cloudevents.Event, error) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/allocate.go new file mode 100644 index 000000000000..c38f71177015 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/allocate.go @@ -0,0 +1,36 @@ +package types + +import "reflect" + +// Allocate allocates a new instance of type t and returns: +// asPtr is of type t if t is a pointer type and of type &t otherwise +// asValue is a Value of type t pointing to the same data as asPtr +func Allocate(obj interface{}) (asPtr interface{}, asValue reflect.Value) { + if obj == nil { + return nil, reflect.Value{} + } + + switch t := reflect.TypeOf(obj); t.Kind() { + case reflect.Ptr: + reflectPtr := reflect.New(t.Elem()) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.Map: + reflectPtr := reflect.MakeMap(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.String: + reflectPtr := reflect.New(t) + asPtr = "" + asValue = reflectPtr.Elem() + case reflect.Slice: + reflectPtr := reflect.MakeSlice(t, 0, 0) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + default: + reflectPtr := reflect.New(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr.Elem() + } + return +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/doc.go new file mode 100644 index 000000000000..1019b4a2dd29 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/doc.go @@ -0,0 +1,4 @@ +/* +Package types provides custom types to support CloudEvents. +*/ +package types diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/timestamp.go new file mode 100644 index 000000000000..6534aacbb502 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/timestamp.go @@ -0,0 +1,83 @@ +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "time" +) + +// Timestamp wraps time.Time to normalize the time layout to RFC3339. It is +// intended to enforce compliance with the CloudEvents spec for their +// definition of Timestamp. Custom marshal methods are implemented to ensure +// the outbound Timestamp is a string in the RFC3339 layout. +type Timestamp struct { + time.Time +} + +// ParseTimestamp attempts to parse the given time assuming RFC3339 layout +func ParseTimestamp(t string) *Timestamp { + if t == "" { + return nil + } + timestamp, err := time.Parse(time.RFC3339Nano, t) + if err != nil { + return nil + } + return &Timestamp{Time: timestamp} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (t *Timestamp) MarshalJSON() ([]byte, error) { + if t == nil || t.IsZero() { + return []byte(`""`), nil + } + rfc3339 := fmt.Sprintf("%q", t.UTC().Format(time.RFC3339Nano)) + return []byte(rfc3339), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (t *Timestamp) UnmarshalJSON(b []byte) error { + var timestamp string + if err := json.Unmarshal(b, ×tamp); err != nil { + return err + } + if pt := ParseTimestamp(timestamp); pt != nil { + *t = *pt + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (t *Timestamp) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if t == nil || t.IsZero() { + return e.EncodeElement(nil, start) + } + v := t.UTC().Format(time.RFC3339Nano) + return e.EncodeElement(v, start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (t *Timestamp) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var timestamp string + if err := d.DecodeElement(×tamp, &start); err != nil { + return err + } + if pt := ParseTimestamp(timestamp); pt != nil { + *t = *pt + } + return nil +} + +// String outputs the time using layout RFC3339. +func (t *Timestamp) String() string { + if t == nil { + return time.Time{}.UTC().Format(time.RFC3339Nano) + } + + return t.UTC().Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/urlref.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/urlref.go new file mode 100644 index 000000000000..2743c45e2b9c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/urlref.go @@ -0,0 +1,77 @@ +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URLRef is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI-Reference. Custom +// marshal methods are implemented to ensure the outbound URLRef object is +// is a flat string. +type URLRef struct { + url.URL +} + +// ParseURLRef attempts to parse the given string as a URI-Reference. +func ParseURLRef(u string) *URLRef { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URLRef{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URLRef) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URLRef) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURLRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URLRef) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URLRef) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURLRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// String returns the full string representation of the URI-Reference. +func (u *URLRef) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS new file mode 100644 index 000000000000..e491a9e7f783 --- /dev/null +++ b/vendor/go.opencensus.io/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE new file mode 100644 index 000000000000..7a4a3ea2424c --- /dev/null +++ b/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 000000000000..81dc7183ec39 --- /dev/null +++ b/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/internal" + +import ( + "fmt" + "time" + + opencensus "go.opencensus.io" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 000000000000..de8ccf236c4b --- /dev/null +++ b/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go new file mode 100644 index 000000000000..41b2c3fc0387 --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package tagencoding contains the tag encoding +// used interally by the stats collector. +package tagencoding // import "go.opencensus.io/internal/tagencoding" + +// Values represent the encoded buffer for the values. +type Values struct { + Buffer []byte + WriteIndex int + ReadIndex int +} + +func (vb *Values) growIfRequired(expected int) { + if len(vb.Buffer)-vb.WriteIndex < expected { + tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) + copy(tmp, vb.Buffer) + vb.Buffer = tmp + } +} + +// WriteValue is the helper method to encode Values from map[Key][]byte. +func (vb *Values) WriteValue(v []byte) { + length := len(v) & 0xff + vb.growIfRequired(1 + length) + + // writing length of v + vb.Buffer[vb.WriteIndex] = byte(length) + vb.WriteIndex++ + + if length == 0 { + // No value was encoded for this key + return + } + + // writing v + copy(vb.Buffer[vb.WriteIndex:], v[:length]) + vb.WriteIndex += length +} + +// ReadValue is the helper method to decode Values to a map[Key][]byte. +func (vb *Values) ReadValue() []byte { + // read length of v + length := int(vb.Buffer[vb.ReadIndex]) + vb.ReadIndex++ + if length == 0 { + // No value was encoded for this key + return nil + } + + // read value of v + v := make([]byte, length) + endIdx := vb.ReadIndex + length + copy(v, vb.Buffer[vb.ReadIndex:endIdx]) + vb.ReadIndex = endIdx + return v +} + +// Bytes returns a reference to already written bytes in the Buffer. +func (vb *Values) Bytes() []byte { + return vb.Buffer[:vb.WriteIndex] +} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 000000000000..073af7b473a6 --- /dev/null +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +// LocalSpanStoreEnabled true if the local span store is enabled. +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go new file mode 100644 index 000000000000..52a7b3bf8509 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 000000000000..12695ce2dc74 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplars keys. +const ( + AttachmentKeySpanContext = "SpanContext" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 000000000000..aadae41e6a21 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,35 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelKey represents key of a label. It has optional +// description attribute. +type LabelKey struct { + Key string + Description string +} + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 000000000000..8293712c77f0 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []LabelKey // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 000000000000..7fe057b19cf7 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 000000000000..c3f8ec27b53c --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go new file mode 100644 index 000000000000..b483a1371b09 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -0,0 +1,27 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 000000000000..ca1f39049385 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go new file mode 100644 index 000000000000..6cee9ed17833 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -0,0 +1,28 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "go.opencensus.io/metric/metricdata" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go new file mode 100644 index 000000000000..e5e4b4368c1a --- /dev/null +++ b/vendor/go.opencensus.io/opencensus.go @@ -0,0 +1,21 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.23.0" +} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 000000000000..b1764e1d3b94 --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go new file mode 100644 index 000000000000..00d473ee0298 --- /dev/null +++ b/vendor/go.opencensus.io/stats/doc.go @@ -0,0 +1,69 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package stats contains support for OpenCensus stats recording. + +OpenCensus allows users to create typed measures, record measurements, +aggregate the collected data, and export the aggregated data. + +Measures + +A measure represents a type of data point to be tracked and recorded. +For example, latency, request Mb/s, and response Mb/s are measures +to collect from a server. + +Measure constructors such as Int64 and Float64 automatically +register the measure by the given name. Each registered measure needs +to be unique by name. Measures also have a description and a unit. + +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. + +Recording measurements + +Measurement is a data point to be collected for a measure. For example, +for a latency (ms) measure, 100 is a measurement that represents a 100ms +latency event. Measurements are created from measures with +the current context. Tags from the current context are recorded with the +measurements if they are any. + +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable +recording to reduce cost. Recording of measurements is cheap. + +Libraries can always record measurements, and applications can later decide +on which measurements they want to collect by registering views. This allows +libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + +*/ +package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go new file mode 100644 index 000000000000..36935e629b66 --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opencensus.io/tag" +) + +// DefaultRecorder will be called for each Record call. +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go new file mode 100644 index 000000000000..1ffd3cefc730 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure.go @@ -0,0 +1,109 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "sync" + "sync/atomic" +) + +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures +// to collect from a server. +// +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. +type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. + Name() string + + // Description returns the human-readable description of this measure. + Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html + Unit() string +} + +// measureDescriptor is the untyped descriptor associated with each measure. +// Int64Measure and Float64Measure wrap measureDescriptor to provide typed +// recording APIs. +// Two Measures with the same name will have the same measureDescriptor. +type measureDescriptor struct { + subs int32 // access atomically + + name string + description string + unit string +} + +func (m *measureDescriptor) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measureDescriptor) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + +var ( + mu sync.RWMutex + measures = make(map[string]*measureDescriptor) +) + +func registerMeasureHandle(name, desc, unit string) *measureDescriptor { + mu.Lock() + defer mu.Unlock() + + if stored, ok := measures[name]; ok { + return stored + } + m := &measureDescriptor{ + name: name, + description: desc, + unit: unit, + } + measures[name] = m + return m +} + +// Measurement is the numeric value measured when recording stats. Each measure +// provides methods to create measurements of their kind. For example, Int64Measure +// provides M to convert an int64 into a measurement. +type Measurement struct { + v float64 + m Measure + desc *measureDescriptor +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m +} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go new file mode 100644 index 000000000000..f02c1eda845d --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Float64Measure is a measure for float64 values. +type Float64Measure struct { + desc *measureDescriptor +} + +// M creates a new float64 measurement. +// Use Record to record measurements. +func (m *Float64Measure) M(v float64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: v, + } +} + +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Float64(name, description, unit string) *Float64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Float64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go new file mode 100644 index 000000000000..d101d7973581 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Int64Measure is a measure for int64 values. +type Int64Measure struct { + desc *measureDescriptor +} + +// M creates a new int64 measurement. +// Use Record to record measurements. +func (m *Int64Measure) M(v int64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), + } +} + +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Int64(name, description, unit string) *Int64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Int64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go new file mode 100644 index 000000000000..ad4691184dfc --- /dev/null +++ b/vendor/go.opencensus.io/stats/record.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "context" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + +type recordOptions struct { + attachments metricdata.Attachments + mutators []tag.Mutator + measurements []Measurement +} + +// WithAttachments applies provided exemplar attachments. +func WithAttachments(attachments metricdata.Attachments) Options { + return func(ro *recordOptions) { + ro.attachments = attachments + } +} + +// WithTags applies provided tag mutators. +func WithTags(mutators ...tag.Mutator) Options { + return func(ro *recordOptions) { + ro.mutators = mutators + } +} + +// WithMeasurements applies provided measurements. +func WithMeasurements(measurements ...Measurement) Options { + return func(ro *recordOptions) { + ro.measurements = measurements + } +} + +// Options apply changes to recordOptions. +type Options func(*recordOptions) + +func createRecordOption(ros ...Options) *recordOptions { + o := &recordOptions{} + for _, ro := range ros { + ro(o) + } + return o +} + +// Record records one or multiple measurements with the same context at once. +// If there are any tags in the context, measurements will be tagged with them. +func Record(ctx context.Context, ms ...Measurement) { + RecordWithOptions(ctx, WithMeasurements(ms...)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) +} + +// RecordWithOptions records measurements from the given options (if any) against context +// and tags and attachments in the options (if any). +// If there are any tags in the context, measurements will be tagged with them. +func RecordWithOptions(ctx context.Context, ros ...Options) error { + o := createRecordOption(ros...) + if len(o.measurements) == 0 { + return nil + } + recorder := internal.DefaultRecorder + if recorder == nil { + return nil + } + record := false + for _, m := range o.measurements { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return nil + } + if len(o.mutators) > 0 { + var err error + if ctx, err = tag.New(ctx, o.mutators...); err != nil { + return err + } + } + recorder(tag.FromContext(ctx), o.measurements, o.attachments) + return nil +} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go new file mode 100644 index 000000000000..6931a5f29661 --- /dev/null +++ b/vendor/go.opencensus.io/stats/units.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Units are encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +const ( + UnitNone = "1" // Deprecated: Use UnitDimensionless. + UnitDimensionless = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" +) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go new file mode 100644 index 000000000000..8bd25314e20e --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -0,0 +1,120 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +// AggType represents the type of aggregation function used on a View. +type AggType int + +// All available aggregation types. +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeDistribution // the distribution aggregation, see Distribution. + AggTypeLastValue // the last value aggregation, see LastValue. +) + +func (t AggType) String() string { + return aggTypeName[t] +} + +var aggTypeName = map[AggType]string{ + AggTypeNone: "None", + AggTypeCount: "Count", + AggTypeSum: "Sum", + AggTypeDistribution: "Distribution", + AggTypeLastValue: "LastValue", +} + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func() AggregationData +} + +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func() AggregationData { + return &CountData{} + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func() AggregationData { + return &SumData{} + }, + } +) + +// Count indicates that data collected and aggregated +// with this method will be turned into a count value. +// For example, total number of accepted requests can be +// aggregated by using Count. +func Count() *Aggregation { + return aggCount +} + +// Sum indicates that data collected and aggregated +// with this method will be summed up. +// For example, accumulated request bytes can be aggregated by using +// Sum. +func Sum() *Aggregation { + return aggSum +} + +// Distribution indicates that the desired aggregation is +// a histogram distribution. +// +// A distribution aggregation may contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described +// by the bounds. This defines len(bounds)+1 buckets. +// +// If len(bounds) >= 2 then the boundaries for bucket index i are: +// +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length +// +// If len(bounds) is 0 then there is no histogram associated with the +// distribution. There will be a single bucket with boundaries +// (-infinity, +infinity). +// +// If len(bounds) is 1 then there is no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +func Distribution(bounds ...float64) *Aggregation { + return &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + newData: func() AggregationData { + return newDistributionData(bounds) + }, + } +} + +// LastValue only reports the last value recorded using this +// aggregation. All other measurements will be dropped. +func LastValue() *Aggregation { + return &Aggregation{ + Type: AggTypeLastValue, + newData: func() AggregationData { + return &LastValueData{} + }, + } +} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go new file mode 100644 index 000000000000..d500e67f7335 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -0,0 +1,293 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "math" + "time" + + "go.opencensus.io/metric/metricdata" +) + +// AggregationData represents an aggregated value from a collection. +// They are reported on the view data during exporting. +// Mosts users won't directly access aggregration data. +type AggregationData interface { + isAggregationData() bool + addSample(v float64, attachments map[string]interface{}, t time.Time) + clone() AggregationData + equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point +} + +const epsilon = 1e-9 + +// CountData is the aggregated data for the Count aggregation. +// A count aggregation processes data and counts the recordings. +// +// Most users won't directly access count data. +type CountData struct { + Value int64 +} + +func (a *CountData) isAggregationData() bool { return true } + +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { + a.Value = a.Value + 1 +} + +func (a *CountData) clone() AggregationData { + return &CountData{Value: a.Value} +} + +func (a *CountData) equal(other AggregationData) bool { + a2, ok := other.(*CountData) + if !ok { + return false + } + + return a.Value == a2.Value +} + +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// SumData is the aggregated data for the Sum aggregation. +// A sum aggregation processes data and sums up the recordings. +// +// Most users won't directly access sum data. +type SumData struct { + Value float64 +} + +func (a *SumData) isAggregationData() bool { return true } + +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v +} + +func (a *SumData) clone() AggregationData { + return &SumData{Value: a.Value} +} + +func (a *SumData) equal(other AggregationData) bool { + a2, ok := other.(*SumData) + if !ok { + return false + } + return math.Pow(a.Value-a2.Value, 2) < epsilon +} + +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// DistributionData is the aggregated data for the +// Distribution aggregation. +// +// Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. +type DistributionData struct { + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar + bounds []float64 // histogram distribution of the values +} + +func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 + return &DistributionData{ + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, + } +} + +// Sum returns the sum of all samples collected. +func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } + +func (a *DistributionData) variance() float64 { + if a.Count <= 1 { + return 0 + } + return a.SumOfSquaredDev / float64(a.Count-1) +} + +func (a *DistributionData) isAggregationData() bool { return true } + +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v + } + if v > a.Max { + a.Max = v + } + a.Count++ + a.addToBucket(v, attachments, t) + + if a.Count == 1 { + a.Mean = v + return + } + + oldMean := a.Mean + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) +} + +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { + var count *int64 + var i int + var b float64 + for i, b = range a.bounds { + if v < b { + count = &a.CountPerBucket[i] + break + } + } + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] + } + *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } +} + +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, + } +} + +func (a *DistributionData) clone() AggregationData { + c := *a + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) + return &c +} + +func (a *DistributionData) equal(other AggregationData) bool { + a2, ok := other.(*DistributionData) + if !ok { + return false + } + if a2 == nil { + return false + } + if len(a.CountPerBucket) != len(a2.CountPerBucket) { + return false + } + for i := range a.CountPerBucket { + if a.CountPerBucket[i] != a2.CountPerBucket[i] { + return false + } + } + return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon +} + +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + +// LastValueData returns the last value recorded for LastValue aggregation. +type LastValueData struct { + Value float64 +} + +func (l *LastValueData) isAggregationData() bool { + return true +} + +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + l.Value = v +} + +func (l *LastValueData) clone() AggregationData { + return &LastValueData{l.Value} +} + +func (l *LastValueData) equal(other AggregationData) bool { + a2, ok := other.(*LastValueData) + if !ok { + return false + } + return l.Value == a2.Value +} + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go new file mode 100644 index 000000000000..8a6a2c0fdc9f --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -0,0 +1,86 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "sort" + "time" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/tag" +) + +type collector struct { + // signatures holds the aggregations values for each unique tag signature + // (values for all keys) to its aggregator. + signatures map[string]AggregationData + // Aggregation is the description of the aggregation to perform for this + // view. + a *Aggregation +} + +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { + aggregator, ok := c.signatures[s] + if !ok { + aggregator = c.a.newData() + c.signatures[s] = aggregator + } + aggregator.addSample(v, attachments, t) +} + +// collectRows returns a snapshot of the collected Row values. +func (c *collector) collectedRows(keys []tag.Key) []*Row { + rows := make([]*Row, 0, len(c.signatures)) + for sig, aggregator := range c.signatures { + tags := decodeTags([]byte(sig), keys) + row := &Row{Tags: tags, Data: aggregator.clone()} + rows = append(rows, row) + } + return rows +} + +func (c *collector) clearRows() { + c.signatures = make(map[string]AggregationData) +} + +// encodeWithKeys encodes the map by using values +// only associated with the keys provided. +func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + vb := &tagencoding.Values{ + Buffer: make([]byte, len(keys)), + } + for _, k := range keys { + v, _ := m.Value(k) + vb.WriteValue([]byte(v)) + } + return vb.Bytes() +} + +// decodeTags decodes tags from the buffer and +// orders them by the keys. +func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { + vb := &tagencoding.Values{Buffer: buf} + var tags []tag.Tag + for _, k := range keys { + v := vb.ReadValue() + if v != nil { + tags = append(tags, tag.Tag{Key: k, Value: string(v)}) + } + } + vb.ReadIndex = 0 + sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) + return tags +} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go new file mode 100644 index 000000000000..7bbedfe1ff23 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -0,0 +1,47 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registered and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. +package view // import "go.opencensus.io/stats/view" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go new file mode 100644 index 000000000000..7cb59718f5fe --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -0,0 +1,58 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package view + +import "sync" + +var ( + exportersMu sync.RWMutex // guards exporters + exporters = make(map[Exporter]struct{}) +) + +// Exporter exports the collected records as view data. +// +// The ExportView method should return quickly; if an +// Exporter takes a significant amount of time to +// process a Data, that work should be done on another goroutine. +// +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// +// The Data should not be modified. +type Exporter interface { + ExportView(viewData *Data) +} + +// RegisterExporter registers an exporter. +// Collected data will be reported via all the +// registered exporters. Once you no longer +// want data to be exported, invoke UnregisterExporter +// with the previously registered exporter. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + exporters[e] = struct{}{} +} + +// UnregisterExporter unregisters an exporter. +func UnregisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + delete(exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go new file mode 100644 index 000000000000..293b54ecbedc --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -0,0 +1,221 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Register function before data will be +// collected and sent to Exporters. +type View struct { + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. + + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key + + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure + + // Aggregation is the aggregation function to apply to the set of Measurements. + Aggregation *Aggregation +} + +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew +} + +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() +} + +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + +// canonicalize canonicalizes v by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalize() error { + if v.Measure == nil { + return fmt.Errorf("cannot register view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) + } + if v.Name == "" { + v.Name = v.Measure.Name() + } + if v.Description == "" { + v.Description = v.Measure.Description() + } + if err := checkViewName(v.Name); err != nil { + return err + } + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() + }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + + return nil +} + +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor +} + +func newViewInternal(v *View) (*viewInternal, error) { + return &viewInternal{ + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), + }, nil +} + +func (v *viewInternal) subscribe() { + atomic.StoreUint32(&v.subscribed, 1) +} + +func (v *viewInternal) unsubscribe() { + atomic.StoreUint32(&v.subscribed, 0) +} + +// isSubscribed returns true if the view is exporting +// data by subscription. +func (v *viewInternal) isSubscribed() bool { + return atomic.LoadUint32(&v.subscribed) == 1 +} + +func (v *viewInternal) clearRows() { + v.collector.clearRows() +} + +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) +} + +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { + if !v.isSubscribed() { + return + } + sig := string(encodeWithKeys(m, v.view.TagKeys)) + v.collector.addSample(sig, val, attachments, t) +} + +// A Data is a set of rows about usage of the single measure associated +// with the given view. Each row is specific to a unique set of tags. +type Data struct { + View *View + Start, End time.Time + Rows []*Row +} + +// Row is the collected value for a specific set of key value pairs a.k.a tags. +type Row struct { + Tags []tag.Tag + Data AggregationData +} + +func (r *Row) String() string { + var buffer bytes.Buffer + buffer.WriteString("{ ") + buffer.WriteString("{ ") + for _, t := range r.Tags { + buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) + } + buffer.WriteString(" }") + buffer.WriteString(fmt.Sprintf("%v", r.Data)) + buffer.WriteString(" }") + return buffer.String() +} + +// Equal returns true if both rows are equal. Tags are expected to be ordered +// by the key name. Even if both rows have the same tags but the tags appear in +// different orders it will return false. +func (r *Row) Equal(other *Row) bool { + if r == other { + return true + } + return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) +} + +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} + +func checkViewName(name string) error { + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) + } + if !isPrintable(name) { + return fmt.Errorf("view name needs to be an ASCII string") + } + return nil +} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 000000000000..293c1646df21 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,149 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLabelKeys(v *View) []metricdata.LabelKey { + labelKeys := []metricdata.LabelKey{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: convertUnit(v), + Type: getType(v), + LabelKeys: getLabelKeys(v), + } +} + +func convertUnit(v *View) metricdata.Unit { + switch v.Aggregation.Type { + case AggTypeCount: + return metricdata.UnitDimensionless + default: + return getUnit(v.Measure.Unit()) + } +} + +func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + tagMap := make(map[string]string) + for _, tag := range row.Tags { + tagMap[tag.Key.Name()] = tag.Value + } + + for _, key := range expectedKeys { + if val, ok := tagMap[key.Key]; ok { + labelValues = append(labelValues, metricdata.NewLabelValue(val)) + } else { + labelValues = append(labelValues, metricdata.LabelValue{}) + } + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go new file mode 100644 index 000000000000..2f3c018af0e1 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -0,0 +1,281 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "fmt" + "sync" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + defaultWorker = newWorker() + go defaultWorker.start() + internal.DefaultRecorder = record +} + +type measureRef struct { + measure string + views map[*viewInternal]struct{} +} + +type worker struct { + measures map[string]*measureRef + views map[string]*viewInternal + startTimes map[*viewInternal]time.Time + + timer *time.Ticker + c chan command + quit, done chan bool + mu sync.RWMutex +} + +var defaultWorker *worker + +var defaultReportingDuration = 10 * time.Second + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func Find(name string) (v *View) { + req := &getViewByNameReq{ + name: name, + c: make(chan *getViewByNameResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.v +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func Register(views ...*View) error { + req := ®isterViewReq{ + views: views, + err: make(chan error), + } + defaultWorker.c <- req + return <-req.err +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func Unregister(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } + req := &unregisterFromViewReq{ + views: names, + done: make(chan struct{}), + } + defaultWorker.c <- req + <-req.done +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func RetrieveData(viewName string) ([]*Row, error) { + req := &retrieveDataReq{ + now: time.Now(), + v: viewName, + c: make(chan *retrieveDataResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.rows, resp.err +} + +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + req := &recordReq{ + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), + } + defaultWorker.c <- req +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func SetReportingPeriod(d time.Duration) { + // TODO(acetechnologist): ensure that the duration d is more than a certain + // value. e.g. 1s + req := &setReportingPeriodReq{ + d: d, + c: make(chan bool), + } + defaultWorker.c <- req + <-req.c // don't return until the timer is set to the new duration. +} + +func newWorker() *worker { + return &worker{ + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + startTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + } +} + +func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + + for { + select { + case cmd := <-w.c: + cmd.handleCommand(w) + case <-w.timer.C: + w.reportUsage(time.Now()) + case <-w.quit: + w.timer.Stop() + close(w.c) + w.done <- true + return + } + } +} + +func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + + w.quit <- true + <-w.done +} + +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { + return mr + } + mr := &measureRef{ + measure: name, + views: make(map[*viewInternal]struct{}), + } + w.measures[name] = mr + return mr +} + +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() + vi, err := newViewInternal(v) + if err != nil { + return nil, err + } + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) + } + + // the view is already registered so there is nothing to do and the + // command is considered successful. + return x, nil + } + w.views[vi.view.Name] = vi + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil +} + +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + +func (w *worker) reportUsage(now time.Time) { + w.mu.Lock() + defer w.mu.Unlock() + for _, v := range w.views { + w.reportView(v, now) + } +} + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go new file mode 100644 index 000000000000..0267e179aed4 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -0,0 +1,186 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +type command interface { + handleCommand(w *worker) +} + +// getViewByNameReq is the command to get a view given its name. +type getViewByNameReq struct { + name string + c chan *getViewByNameResp +} + +type getViewByNameResp struct { + v *View +} + +func (cmd *getViewByNameReq) handleCommand(w *worker) { + v := w.views[cmd.name] + if v == nil { + cmd.c <- &getViewByNameResp{nil} + return + } + cmd.c <- &getViewByNameResp{v.view} +} + +// registerViewReq is the command to register a view. +type registerViewReq struct { + views []*View + err chan error +} + +func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + internal.SubscriptionReporter(view.Measure.Name()) + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { + cmd.err <- nil + } +} + +// unregisterFromViewReq is the command to unregister to a view. Has no +// impact on the data collection for client that are pulling data from the +// library. +type unregisterFromViewReq struct { + views []string + done chan struct{} +} + +func (cmd *unregisterFromViewReq) handleCommand(w *worker) { + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } + w.unregisterView(name) + } + cmd.done <- struct{}{} +} + +// retrieveDataReq is the command to retrieve data for a view. +type retrieveDataReq struct { + now time.Time + v string + c chan *retrieveDataResp +} + +type retrieveDataResp struct { + rows []*Row + err error +} + +func (cmd *retrieveDataReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + vi, ok := w.views[cmd.v] + if !ok { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), + } + return + } + + if !vi.isSubscribed() { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), + } + return + } + cmd.c <- &retrieveDataResp{ + vi.collectedRows(), + nil, + } +} + +// recordReq is the command to record data related to multiple measures +// at once. +type recordReq struct { + tm *tag.Map + ms []stats.Measurement + attachments map[string]interface{} + t time.Time +} + +func (cmd *recordReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not registered + continue + } + ref := w.getMeasureRef(m.Measure().Name()) + for v := range ref.views { + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + } + } +} + +// setReportingPeriodReq is the command to modify the duration between +// reporting the collected data to the registered clients. +type setReportingPeriodReq struct { + d time.Duration + c chan bool +} + +func (cmd *setReportingPeriodReq) handleCommand(w *worker) { + w.timer.Stop() + if cmd.d <= 0 { + w.timer = time.NewTicker(defaultReportingDuration) + } else { + w.timer = time.NewTicker(cmd.d) + } + cmd.c <- true +} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go new file mode 100644 index 000000000000..b27d1b26b132 --- /dev/null +++ b/vendor/go.opencensus.io/tag/context.go @@ -0,0 +1,43 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "context" +) + +// FromContext returns the tag map stored in the context. +func FromContext(ctx context.Context) *Map { + // The returned tag map shouldn't be mutated. + ts := ctx.Value(mapCtxKey) + if ts == nil { + return nil + } + return ts.(*Map) +} + +// NewContext creates a new context with the given tag map. +// To propagate a tag map to downstream methods and downstream RPCs, add a tag map +// to the current context. NewContext will return a copy of the current context, +// and put the tag map into the returned one. +// If there is already a tag map in the current context, it will be replaced with m. +func NewContext(ctx context.Context, m *Map) context.Context { + return context.WithValue(ctx, mapCtxKey, m) +} + +type ctxKey struct{} + +var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go new file mode 100644 index 000000000000..da16b74e4deb --- /dev/null +++ b/vendor/go.opencensus.io/tag/doc.go @@ -0,0 +1,26 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package tag contains OpenCensus tags. + +Tags are key-value pairs. Tags provide additional cardinality to +the OpenCensus instrumentation data. + +Tags can be propagated on the wire and in the same +process via context.Context. Encode and Decode should be +used to represent tags into their binary propagation form. +*/ +package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go new file mode 100644 index 000000000000..71ec913657b7 --- /dev/null +++ b/vendor/go.opencensus.io/tag/key.go @@ -0,0 +1,44 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +// Key represents a tag key. +type Key struct { + name string +} + +// NewKey creates or retrieves a string key identified by name. +// Calling NewKey more than once with the same name returns the same key. +func NewKey(name string) (Key, error) { + if !checkKeyName(name) { + return Key{}, errInvalidKeyName + } + return Key{name: name}, nil +} + +// MustNewKey returns a key with the given name, and panics if name is an invalid key name. +func MustNewKey(name string) Key { + k, err := NewKey(name) + if err != nil { + panic(err) + } + return k +} + +// Name returns the name of the key. +func (k Key) Name() string { + return k.name +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go new file mode 100644 index 000000000000..0272ef85a4cc --- /dev/null +++ b/vendor/go.opencensus.io/tag/map.go @@ -0,0 +1,229 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "bytes" + "context" + "fmt" + "sort" +) + +// Tag is a key value pair that can be propagated on wire. +type Tag struct { + Key Key + Value string +} + +type tagContent struct { + value string + m metadatas +} + +// Map is a map of tags. Use New to create a context containing +// a new Map. +type Map struct { + m map[Key]tagContent +} + +// Value returns the value for the key if a value for the key exists. +func (m *Map) Value(k Key) (string, bool) { + if m == nil { + return "", false + } + v, ok := m.m[k] + return v.value, ok +} + +func (m *Map) String() string { + if m == nil { + return "nil" + } + keys := make([]Key, 0, len(m.m)) + for k := range m.m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) + + var buffer bytes.Buffer + buffer.WriteString("{ ") + for _, k := range keys { + buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) + } + buffer.WriteString(" }") + return buffer.String() +} + +func (m *Map) insert(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + return + } + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) update(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + m.m[k] = tagContent{value: v, m: md} + } +} + +func (m *Map) upsert(k Key, v string, md metadatas) { + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) delete(k Key) { + delete(m.m, k) +} + +func newMap() *Map { + return &Map{m: make(map[Key]tagContent)} +} + +// Mutator modifies a tag map. +type Mutator interface { + Mutate(t *Map) (*Map, error) +} + +// Insert returns a mutator that inserts a +// value associated with k. If k already exists in the tag map, +// mutator doesn't update the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Insert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.insert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Update returns a mutator that updates the +// value of the tag associated with k with v. If k doesn't +// exists in the tag map, the mutator doesn't insert the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Update(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.update(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Upsert returns a mutator that upserts the +// value of the tag associated with k with v. It inserts the +// value if k doesn't exist already. It mutates the value +// if k already exists. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Upsert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.upsert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +func createMetadatas(mds ...Metadata) metadatas { + var metas metadatas + if len(mds) > 0 { + for _, md := range mds { + if md != nil { + md(&metas) + } + } + } else { + WithTTL(TTLUnlimitedPropagation)(&metas) + } + return metas + +} + +// Delete returns a mutator that deletes +// the value associated with k. +func Delete(k Key) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + m.delete(k) + return m, nil + }, + } +} + +// New returns a new context that contains a tag map +// originated from the incoming context and modified +// with the provided mutators. +func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { + m := newMap() + orig := FromContext(ctx) + if orig != nil { + for k, v := range orig.m { + if !checkKeyName(k.Name()) { + return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) + } + if !checkValue(v.value) { + return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) + } + m.insert(k, v.value, v.m) + } + } + var err error + for _, mod := range mutator { + m, err = mod.Mutate(m) + if err != nil { + return ctx, err + } + } + return NewContext(ctx, m), nil +} + +// Do is similar to pprof.Do: a convenience for installing the tags +// from the context as Go profiler labels. This allows you to +// correlated runtime profiling with stats. +// +// It converts the key/values from the given map to Go profiler labels +// and calls pprof.Do. +// +// Do is going to do nothing if your Go version is below 1.9. +func Do(ctx context.Context, f func(ctx context.Context)) { + do(ctx, f) +} + +type mutator struct { + fn func(t *Map) (*Map, error) +} + +func (m *mutator) Mutate(t *Map) (*Map, error) { + return m.fn(t) +} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go new file mode 100644 index 000000000000..c242e695c8c7 --- /dev/null +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -0,0 +1,239 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "encoding/binary" + "fmt" +) + +// KeyType defines the types of keys allowed. Currently only keyTypeString is +// supported. +type keyType byte + +const ( + keyTypeString keyType = iota + keyTypeInt64 + keyTypeTrue + keyTypeFalse + + tagsVersionID = byte(0) +) + +type encoderGRPC struct { + buf []byte + writeIdx, readIdx int +} + +// writeKeyString writes the fieldID '0' followed by the key string and value +// string. +func (eg *encoderGRPC) writeTagString(k, v string) { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k) + eg.writeStringWithVarintLen(v) +} + +func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { + eg.writeByte(byte(keyTypeInt64)) + eg.writeStringWithVarintLen(k) + eg.writeUint64(i) +} + +func (eg *encoderGRPC) writeTagTrue(k string) { + eg.writeByte(byte(keyTypeTrue)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeTagFalse(k string) { + eg.writeByte(byte(keyTypeFalse)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { + length := len(bytes) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], bytes) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeStringWithVarintLen(s string) { + length := len(s) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], s) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeByte(v byte) { + eg.growIfRequired(1) + eg.buf[eg.writeIdx] = v + eg.writeIdx++ +} + +func (eg *encoderGRPC) writeUint32(i uint32) { + eg.growIfRequired(4) + binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 4 +} + +func (eg *encoderGRPC) writeUint64(i uint64) { + eg.growIfRequired(8) + binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 8 +} + +func (eg *encoderGRPC) readByte() byte { + b := eg.buf[eg.readIdx] + eg.readIdx++ + return b +} + +func (eg *encoderGRPC) readUint32() uint32 { + i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) + eg.readIdx += 4 + return i +} + +func (eg *encoderGRPC) readUint64() uint64 { + i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) + eg.readIdx += 8 + return i +} + +func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { + if eg.readEnded() { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) + if valueStart <= 0 { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + + valueStart += eg.readIdx + valueEnd := valueStart + int(length) + if valueEnd > len(eg.buf) { + return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) + } + + eg.readIdx = valueEnd + return eg.buf[valueStart:valueEnd], nil +} + +func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { + bytes, err := eg.readBytesWithVarintLen() + if err != nil { + return "", err + } + return string(bytes), nil +} + +func (eg *encoderGRPC) growIfRequired(expected int) { + if len(eg.buf)-eg.writeIdx < expected { + tmp := make([]byte, 2*(len(eg.buf)+1)+expected) + copy(tmp, eg.buf) + eg.buf = tmp + } +} + +func (eg *encoderGRPC) readEnded() bool { + return eg.readIdx >= len(eg.buf) +} + +func (eg *encoderGRPC) bytes() []byte { + return eg.buf[:eg.writeIdx] +} + +// Encode encodes the tag map into a []byte. It is useful to propagate +// the tag maps on wire in binary format. +func Encode(m *Map) []byte { + if m == nil { + return nil + } + eg := &encoderGRPC{ + buf: make([]byte, len(m.m)), + } + eg.writeByte(tagsVersionID) + for k, v := range m.m { + if v.m.ttl.ttl == valueTTLUnlimitedPropagation { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v.value)) + } + } + return eg.bytes() +} + +// Decode decodes the given []byte into a tag map. +func Decode(bytes []byte) (*Map, error) { + ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} + +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { + eg := &encoderGRPC{ + buf: bytes, + } + if len(eg.buf) == 0 { + return nil + } + + version := eg.readByte() + if version > tagsVersionID { + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + } + + for !eg.readEnded() { + typ := keyType(eg.readByte()) + + if typ != keyTypeString { + return fmt.Errorf("cannot decode: invalid key type: %q", typ) + } + + k, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + v, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + key, err := NewKey(string(k)) + if err != nil { + return err + } + val := string(v) + if !checkValue(val) { + return errInvalidValue + } + fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go new file mode 100644 index 000000000000..6571a583ea6c --- /dev/null +++ b/vendor/go.opencensus.io/tag/metadata.go @@ -0,0 +1,52 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +const ( + // valueTTLNoPropagation prevents tag from propagating. + valueTTLNoPropagation = 0 + + // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. + valueTTLUnlimitedPropagation = -1 +) + +// TTL is metadata that specifies number of hops a tag can propagate. +// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata +type TTL struct { + ttl int +} + +var ( + // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. + TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} + + // TTLNoPropagation is TTL metadata that prevents tag from propagating. + TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} +) + +type metadatas struct { + ttl TTL +} + +// Metadata applies metadatas specified by the function. +type Metadata func(*metadatas) + +// WithTTL applies metadata with provided ttl. +func WithTTL(ttl TTL) Metadata { + return func(m *metadatas) { + m.ttl = ttl + } +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go new file mode 100644 index 000000000000..b34d95e34a2c --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package tag + +import ( + "context" + "runtime/pprof" +) + +func do(ctx context.Context, f func(ctx context.Context)) { + m := FromContext(ctx) + keyvals := make([]string, 0, 2*len(m.m)) + for k, v := range m.m { + keyvals = append(keyvals, k.Name(), v.value) + } + pprof.Do(ctx, pprof.Labels(keyvals...), f) +} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go new file mode 100644 index 000000000000..83adbce56b72 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -0,0 +1,23 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package tag + +import "context" + +func do(ctx context.Context, f func(ctx context.Context)) { + f(ctx) +} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go new file mode 100644 index 000000000000..0939fc67483a --- /dev/null +++ b/vendor/go.opencensus.io/tag/validate.go @@ -0,0 +1,56 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import "errors" + +const ( + maxKeyLength = 255 + + // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). + validKeyValueMin = 32 + validKeyValueMax = 126 +) + +var ( + errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") + errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") +) + +func checkKeyName(name string) bool { + if len(name) == 0 { + return false + } + if len(name) > maxKeyLength { + return false + } + return isASCII(name) +} + +func isASCII(s string) bool { + for _, c := range s { + if (c < validKeyValueMin) || (c > validKeyValueMax) { + return false + } + } + return true +} + +func checkValue(v string) bool { + if len(v) > maxKeyLength { + return false + } + return isASCII(v) +} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 000000000000..0c54492a2b1d --- /dev/null +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,119 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 000000000000..775f8274faae --- /dev/null +++ b/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,86 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + + "go.opencensus.io/trace/internal" +) + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int +} + +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() + c := *config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } + config.Store(&c) +} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 000000000000..04b1ee4f38ea --- /dev/null +++ b/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenCensus distributed tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + + +Exporting Traces + +To export collected tracing data, register at least one exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(exporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. + +Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. +*/ +package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 000000000000..ffc264f23d2d --- /dev/null +++ b/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 000000000000..e0d9a4b99e96 --- /dev/null +++ b/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "sync/atomic" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +type exportersMap map[Exporter]struct{} + +var ( + exporterMu sync.Mutex + exporters atomic.Value +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int +} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 000000000000..7e808d8f30e6 --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,22 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +// IDGenerator allows custom generators for TraceId and SpanId. +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 000000000000..dc7a295c773b --- /dev/null +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,61 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/golang/groupcache/lru" +) + +// A simple lru.Cache wrapper that tracks the keys of the current contents and +// the cumulative number of evicted items. +type lruMap struct { + cacheKeys map[lru.Key]bool + cache *lru.Cache + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{ + cacheKeys: make(map[lru.Key]bool), + cache: lru.New(size), + droppedCount: 0, + } + lm.cache.OnEvicted = func(key lru.Key, value interface{}) { + delete(lm.cacheKeys, key) + lm.droppedCount++ + } + return lm +} + +func (lm lruMap) len() int { + return lm.cache.Len() +} + +func (lm lruMap) keys() []interface{} { + keys := []interface{}{} + for k := range lm.cacheKeys { + keys = append(keys, k) + } + return keys +} + +func (lm *lruMap) add(key, value interface{}) { + lm.cacheKeys[lru.Key(key)] = true + lm.cache.Add(lru.Key(key), value) +} + +func (lm *lruMap) get(key interface{}) (interface{}, bool) { + return lm.cache.Get(key) +} diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 000000000000..71c10f9e3b42 --- /dev/null +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 000000000000..fbabad34c000 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 000000000000..c442d990218a --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,306 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for span := range s.active { + out = append(out, span.makeSpanData()) + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[*Span]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[*Span]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span *Span) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span *Span, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 000000000000..ec60effd1088 --- /dev/null +++ b/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 000000000000..3f8977b41b4b --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,598 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" +) + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type Span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions + Tracestate *tracestate.Tracestate +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := FromContext(ctx); p != nil { + p.addChild() + parent = p.spanContext + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { + span := &Span{} + span.spanContext = parent + + cfg := config.Load().(*Config) + + if !hasParent { + span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + span.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: span.spanContext.TraceID, + SpanID: span.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { + return span + } + + span.data = &SpanData{ + SpanContext: span.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + + if hasParent { + span.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + span.spanStore = ss + ss.add(span) + } + } + + return span +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + if !s.IsRecordingEvents() { + return + } + s.endOnce.Do(func() { + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } + } + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *Span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.lruAttributes.len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +func (s *Span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *Span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}) + for _, key := range s.lruAttributes.keys() { + value, ok := s.lruAttributes.get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *Span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *Span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.copyToCappedAttributes(attributes) + s.mu.Unlock() +} + +// copyAttributes copies a slice of Attributes into a map. +func copyAttributes(m map[string]interface{}, attributes []Attribute) { + for _, a := range attributes { + m[a.key] = a.value + } +} + +func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { + now := time.Now() + msg := fmt.Sprintf(format, a...) + var m map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + m = make(map[string]interface{}) + copyAttributes(m, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: msg, + Attributes: m, + }) + s.mu.Unlock() +} + +func (s *Span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var a map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + a = make(map[string]interface{}) + copyAttributes(a, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: str, + Attributes: a, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.lazyPrintfInternal(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +func (s *Span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + gen := &defaultIDGenerator{} + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + + traceIDAdd [2]uint64 + traceIDRand *rand.Rand +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) + } + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 000000000000..b7d8aaf28477 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,32 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + nctx, task := t.NewTask(ctx, name) + return nctx, task.End +} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 000000000000..e25419859c02 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 000000000000..2d6c713eb3a1 --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +}